From 593adffae4ebdb747e51b46b113d414fe5814e21 Mon Sep 17 00:00:00 2001 From: Vincent Herlemont Date: Sun, 17 Dec 2023 10:47:31 +0100 Subject: [PATCH] feat: total overhaul --- .all-contributorsrc | 2 +- .gitignore | 1 + 05_update.expanded.1.rs | 591 --------------- 05_update.expanded.rs | 592 --------------- Cargo.toml | 25 +- README.md | 205 +++--- benches/overhead_data_size.rs | 63 ++ cargo_publish.sh | 2 +- justfile | 1 + .../.gitignore | 0 .../Cargo.toml | 6 +- native_db_macro/README.md | 1 + native_db_macro/src/keys.rs | 139 ++++ native_db_macro/src/lib.rs | 26 + native_db_macro/src/model_attributes.rs | 96 +++ native_db_macro/src/model_native_db.rs | 153 ++++ native_db_macro/src/native_db.rs | 70 ++ native_db_macro/src/struct_name.rs | 13 + renovate.json | 13 +- src/builder.rs | 319 +++++++- src/common.rs | 11 - src/database.rs | 165 +++++ src/db.rs | 465 ------------ src/db_type/error.rs | 60 ++ src/db_type/input.rs | 67 ++ src/db_type/key/inner_key_value.rs | 377 ++++++++++ src/db_type/key/key_definition.rs | 73 ++ src/db_type/key/key_value.rs | 7 + src/db_type/key/mod.rs | 7 + src/db_type/mod.rs | 11 + src/db_type/output.rs | 26 + src/db_type/result.rs | 3 + src/item.rs | 55 -- src/iterator/mod.rs | 9 - src/iterator/primary_iterator.rs | 31 - src/iterator/primary_iterator_start_with.rs | 30 - src/iterator/secondary_iterator.rs | 51 -- src/iterator/secondary_iterator_start_with.rs | 42 -- src/lib.rs | 197 +---- src/model.rs | 38 + src/operation/mod.rs | 1 - src/operation/write/mod.rs | 104 --- src/query/mod.rs | 11 - src/readable_table.rs | 371 ---------- src/readonly_tables.rs | 69 -- src/readonly_transaction.rs | 18 - src/schema.rs | 7 - src/serialization.rs | 17 - src/snapshot.rs | 36 + src/stats.rs | 5 +- src/table_definition.rs | 108 +-- src/tables.rs | 357 --------- src/transaction.rs | 32 - src/transaction/internal/mod.rs | 3 + .../internal/private_readable_transaction.rs | 68 ++ src/transaction/internal/r_transaction.rs | 62 ++ src/transaction/internal/rw_transaction.rs | 294 ++++++++ src/transaction/mod.rs | 7 + src/transaction/query/drain.rs | 21 + src/transaction/query/get.rs | 48 ++ src/transaction/query/len.rs | 42 ++ src/transaction/query/mod.rs | 9 + src/transaction/query/scan/mod.rs | 79 ++ src/transaction/query/scan/primary_scan.rs | 112 +++ src/transaction/query/scan/secondary_scan.rs | 151 ++++ src/transaction/r_transaction.rs | 28 + src/transaction/rw_transaction.rs | 174 +++++ src/watch/batch.rs | 9 +- src/watch/event.rs | 27 +- src/watch/filter.rs | 57 +- src/watch/mod.rs | 11 +- src/watch/query/get.rs | 25 + src/watch/query/internal.rs | 106 +++ src/watch/query/mod.rs | 25 + src/watch/query/scan.rs | 78 ++ src/watch/request.rs | 22 +- src/watch/sender.rs | 45 +- struct_db_macro/README.md | 1 - struct_db_macro/src/lib.rs | 14 - struct_db_macro/src/model_attributes.rs | 39 - struct_db_macro/src/model_struct_db.rs | 131 ---- struct_db_macro/src/struct_db.rs | 57 -- tests/00_bincode.rs | 49 -- tests/01_fn_primary_key.rs | 1 - tests/02_simple_insert.rs | 35 - tests/03_simple_insert_get.rs | 107 --- tests/04_simple_len.rs | 87 --- tests/05_update.rs | 153 ---- tests/06_transaction.rs | 160 ---- tests/07_simple_multithreads.rs | 84 --- tests/08_fn_key.rs | 212 ------ tests/09_iterator.rs | 694 ------------------ tests/10_remove.rs | 58 -- tests/11_watch.rs | 462 ------------ tests/11_watch_tokio.rs | 62 -- tests/12_migration.rs | 106 --- tests/13_util.rs | 40 - tests/convert_all.rs | 89 +++ tests/macro_def/mod.rs | 7 +- tests/macro_def/only_primary_key.rs | 28 - tests/macro_def/primary_key.rs | 28 + tests/macro_def/primary_key_attribute.rs | 24 + tests/macro_def/secondary_key.rs | 156 ++++ tests/macro_def/secondary_key_attribute.rs | 174 +++++ tests/macro_def/secondary_key_mix.rs | 61 ++ tests/macro_def/with_secondary_keys.rs | 43 -- tests/migrate/only_primary_key.rs | 62 +- tests/migrate/with_secondary_keys.rs | 237 ++++-- tests/modules.rs | 5 +- tests/{14_native_model.rs => native_model.rs} | 17 +- tests/primary_drain/only_primary_key.rs | 97 +-- tests/primary_drain/with_secondary_keys.rs | 158 ++-- tests/query/insert_get_pk.rs | 60 ++ tests/query/insert_get_sk.rs | 155 ++++ tests/query/insert_len_pk.rs | 48 ++ tests/query/insert_remove_pk.rs | 45 ++ tests/query/insert_remove_sk.rs | 113 +++ tests/query/insert_update_pk.rs | 62 ++ tests/query/insert_update_sk.rs | 78 ++ tests/query/mod.rs | 7 + tests/scan.rs | 586 +++++++++++++++ tests/simple_multithreads.rs | 69 ++ tests/skeptic.rs | 13 +- tests/snapshot.rs | 15 + tests/tests.rs | 6 - tests/transaction.rs | 114 +++ tests/util.rs | 40 + tests/watch/mod.rs | 456 ++++++++++++ tests/watch/watch_optional.rs | 127 ++++ tests/watch_tokio.rs | 44 ++ version_update.sh | 12 +- 131 files changed, 6209 insertions(+), 6261 deletions(-) delete mode 100644 05_update.expanded.1.rs delete mode 100644 05_update.expanded.rs create mode 100644 benches/overhead_data_size.rs rename {struct_db_macro => native_db_macro}/.gitignore (100%) rename {struct_db_macro => native_db_macro}/Cargo.toml (69%) create mode 100644 native_db_macro/README.md create mode 100644 native_db_macro/src/keys.rs create mode 100644 native_db_macro/src/lib.rs create mode 100644 native_db_macro/src/model_attributes.rs create mode 100644 native_db_macro/src/model_native_db.rs create mode 100644 native_db_macro/src/native_db.rs create mode 100644 native_db_macro/src/struct_name.rs delete mode 100644 src/common.rs create mode 100644 src/database.rs delete mode 100644 src/db.rs create mode 100644 src/db_type/error.rs create mode 100644 src/db_type/input.rs create mode 100644 src/db_type/key/inner_key_value.rs create mode 100644 src/db_type/key/key_definition.rs create mode 100644 src/db_type/key/key_value.rs create mode 100644 src/db_type/key/mod.rs create mode 100644 src/db_type/mod.rs create mode 100644 src/db_type/output.rs create mode 100644 src/db_type/result.rs delete mode 100644 src/item.rs delete mode 100644 src/iterator/mod.rs delete mode 100644 src/iterator/primary_iterator.rs delete mode 100644 src/iterator/primary_iterator_start_with.rs delete mode 100644 src/iterator/secondary_iterator.rs delete mode 100644 src/iterator/secondary_iterator_start_with.rs create mode 100644 src/model.rs delete mode 100644 src/operation/mod.rs delete mode 100644 src/operation/write/mod.rs delete mode 100644 src/query/mod.rs delete mode 100644 src/readable_table.rs delete mode 100644 src/readonly_tables.rs delete mode 100644 src/readonly_transaction.rs delete mode 100644 src/schema.rs create mode 100644 src/snapshot.rs delete mode 100644 src/tables.rs delete mode 100644 src/transaction.rs create mode 100644 src/transaction/internal/mod.rs create mode 100644 src/transaction/internal/private_readable_transaction.rs create mode 100644 src/transaction/internal/r_transaction.rs create mode 100644 src/transaction/internal/rw_transaction.rs create mode 100644 src/transaction/mod.rs create mode 100644 src/transaction/query/drain.rs create mode 100644 src/transaction/query/get.rs create mode 100644 src/transaction/query/len.rs create mode 100644 src/transaction/query/mod.rs create mode 100644 src/transaction/query/scan/mod.rs create mode 100644 src/transaction/query/scan/primary_scan.rs create mode 100644 src/transaction/query/scan/secondary_scan.rs create mode 100644 src/transaction/r_transaction.rs create mode 100644 src/transaction/rw_transaction.rs create mode 100644 src/watch/query/get.rs create mode 100644 src/watch/query/internal.rs create mode 100644 src/watch/query/mod.rs create mode 100644 src/watch/query/scan.rs delete mode 100644 struct_db_macro/README.md delete mode 100644 struct_db_macro/src/lib.rs delete mode 100644 struct_db_macro/src/model_attributes.rs delete mode 100644 struct_db_macro/src/model_struct_db.rs delete mode 100644 struct_db_macro/src/struct_db.rs delete mode 100644 tests/00_bincode.rs delete mode 100644 tests/01_fn_primary_key.rs delete mode 100644 tests/02_simple_insert.rs delete mode 100644 tests/03_simple_insert_get.rs delete mode 100644 tests/04_simple_len.rs delete mode 100644 tests/05_update.rs delete mode 100644 tests/06_transaction.rs delete mode 100644 tests/07_simple_multithreads.rs delete mode 100644 tests/08_fn_key.rs delete mode 100644 tests/09_iterator.rs delete mode 100644 tests/10_remove.rs delete mode 100644 tests/11_watch.rs delete mode 100644 tests/11_watch_tokio.rs delete mode 100644 tests/12_migration.rs delete mode 100644 tests/13_util.rs create mode 100644 tests/convert_all.rs delete mode 100644 tests/macro_def/only_primary_key.rs create mode 100644 tests/macro_def/primary_key.rs create mode 100644 tests/macro_def/primary_key_attribute.rs create mode 100644 tests/macro_def/secondary_key.rs create mode 100644 tests/macro_def/secondary_key_attribute.rs create mode 100644 tests/macro_def/secondary_key_mix.rs delete mode 100644 tests/macro_def/with_secondary_keys.rs rename tests/{14_native_model.rs => native_model.rs} (84%) create mode 100644 tests/query/insert_get_pk.rs create mode 100644 tests/query/insert_get_sk.rs create mode 100644 tests/query/insert_len_pk.rs create mode 100644 tests/query/insert_remove_pk.rs create mode 100644 tests/query/insert_remove_sk.rs create mode 100644 tests/query/insert_update_pk.rs create mode 100644 tests/query/insert_update_sk.rs create mode 100644 tests/query/mod.rs create mode 100644 tests/scan.rs create mode 100644 tests/simple_multithreads.rs create mode 100644 tests/snapshot.rs delete mode 100644 tests/tests.rs create mode 100644 tests/transaction.rs create mode 100644 tests/util.rs create mode 100644 tests/watch/mod.rs create mode 100644 tests/watch/watch_optional.rs create mode 100644 tests/watch_tokio.rs diff --git a/.all-contributorsrc b/.all-contributorsrc index 66f90ade..980b367b 100644 --- a/.all-contributorsrc +++ b/.all-contributorsrc @@ -21,6 +21,6 @@ "skipCi": true, "repoType": "github", "repoHost": "https://github.com", - "projectName": "struct_db", + "projectName": "native_db", "projectOwner": "vincent-herlemont" } diff --git a/.gitignore b/.gitignore index 09ee29c9..6ea260c6 100644 --- a/.gitignore +++ b/.gitignore @@ -4,6 +4,7 @@ target/ node_modules/ package-lock.json package.json +*.expanded.rs # Related to [Why do binaries have Cargo.lock in version control, but not libraries?](https://doc.rust-lang.org/cargo/faq.html#why-do-binaries-have-cargolock-in-version-control-but-not-libraries) Cargo.lock \ No newline at end of file diff --git a/05_update.expanded.1.rs b/05_update.expanded.1.rs deleted file mode 100644 index 996e5d55..00000000 --- a/05_update.expanded.1.rs +++ /dev/null @@ -1,591 +0,0 @@ -#![feature(prelude_import)] -#![cfg(not(feature = "native_model"))] -#[prelude_import] -use std::prelude::rust_2018::*; -#[macro_use] -extern crate std; -mod tests { - use shortcut_assert_fs::TmpFs; - #[allow(dead_code)] - pub fn init() -> TmpFs { - TmpFs::new().unwrap() - } -} -use serde::{Deserialize, Serialize}; -use struct_db::*; -struct Item(u32); -impl Item { - fn is_native_model() -> bool { - false - } -} -impl struct_db::SDBItem for Item { - fn struct_db_bincode_encode_to_vec(&self) -> Vec { - struct_db::bincode_encode_to_vec(self).expect("Failed to serialize the struct #struct_name") - } - fn struct_db_bincode_decode_from_slice(slice: &[u8]) -> Self { - struct_db::bincode_decode_from_slice(slice) - .expect("Failed to deserialize the struct #struct_name") - .0 - } - fn struct_db_schema() -> struct_db::Schema { - let mut secondary_tables_name = std::collections::HashSet::new(); - struct_db::Schema { - table_name: "item", - primary_key: "p_key", - secondary_tables_name: secondary_tables_name, - } - } - fn struct_db_pk(&self) -> Vec { - self.p_key() - } - fn struct_db_gks(&self) -> std::collections::HashMap<&'static str, Vec> { - let mut secondary_tables_name = std::collections::HashMap::new(); - secondary_tables_name - } -} -/// Index selection Enum for [#struct_name] -pub(crate) enum ItemKey {} -impl struct_db::KeyDefinition for ItemKey { - fn secondary_table_name(&self) -> &'static str { - match self { - _ => { - ::std::rt::begin_panic("Unknown key"); - } - } - } -} -#[doc(hidden)] -#[allow(non_upper_case_globals, unused_attributes, unused_qualifications)] -const _: () = { - #[allow(unused_extern_crates, clippy::useless_attribute)] - extern crate serde as _serde; - #[automatically_derived] - impl _serde::Serialize for Item { - fn serialize<__S>( - &self, - __serializer: __S, - ) -> _serde::__private::Result<__S::Ok, __S::Error> - where - __S: _serde::Serializer, - { - _serde::Serializer::serialize_newtype_struct(__serializer, "Item", &self.0) - } - } -}; -#[doc(hidden)] -#[allow(non_upper_case_globals, unused_attributes, unused_qualifications)] -const _: () = { - #[allow(unused_extern_crates, clippy::useless_attribute)] - extern crate serde as _serde; - #[automatically_derived] - impl<'de> _serde::Deserialize<'de> for Item { - fn deserialize<__D>(__deserializer: __D) -> _serde::__private::Result - where - __D: _serde::Deserializer<'de>, - { - #[doc(hidden)] - struct __Visitor<'de> { - marker: _serde::__private::PhantomData, - lifetime: _serde::__private::PhantomData<&'de ()>, - } - impl<'de> _serde::de::Visitor<'de> for __Visitor<'de> { - type Value = Item; - fn expecting( - &self, - __formatter: &mut _serde::__private::Formatter, - ) -> _serde::__private::fmt::Result { - _serde::__private::Formatter::write_str(__formatter, "tuple struct Item") - } - #[inline] - fn visit_newtype_struct<__E>( - self, - __e: __E, - ) -> _serde::__private::Result - where - __E: _serde::Deserializer<'de>, - { - let __field0: u32 = ::deserialize(__e)?; - _serde::__private::Ok(Item(__field0)) - } - #[inline] - fn visit_seq<__A>( - self, - mut __seq: __A, - ) -> _serde::__private::Result - where - __A: _serde::de::SeqAccess<'de>, - { - let __field0 = match _serde::de::SeqAccess::next_element::(&mut __seq)? { - _serde::__private::Some(__value) => __value, - _serde::__private::None => { - return _serde::__private::Err(_serde::de::Error::invalid_length( - 0usize, - &"tuple struct Item with 1 element", - )); - } - }; - _serde::__private::Ok(Item(__field0)) - } - } - _serde::Deserializer::deserialize_newtype_struct( - __deserializer, - "Item", - __Visitor { - marker: _serde::__private::PhantomData::, - lifetime: _serde::__private::PhantomData, - }, - ) - } - } -}; -#[automatically_derived] -impl ::core::marker::StructuralEq for Item {} -#[automatically_derived] -impl ::core::cmp::Eq for Item { - #[inline] - #[doc(hidden)] - #[no_coverage] - fn assert_receiver_is_total_eq(&self) -> () { - let _: ::core::cmp::AssertParamIsEq; - } -} -#[automatically_derived] -impl ::core::marker::StructuralPartialEq for Item {} -#[automatically_derived] -impl ::core::cmp::PartialEq for Item { - #[inline] - fn eq(&self, other: &Item) -> bool { - self.0 == other.0 - } -} -#[automatically_derived] -impl ::core::fmt::Debug for Item { - fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { - ::core::fmt::Formatter::debug_tuple_field1_finish(f, "Item", &&self.0) - } -} -#[automatically_derived] -impl ::core::clone::Clone for Item { - #[inline] - fn clone(&self) -> Item { - Item(::core::clone::Clone::clone(&self.0)) - } -} -impl Item { - pub fn p_key(&self) -> Vec { - self.0.to_be_bytes().to_vec() - } -} -extern crate test; -#[cfg(test)] -#[rustc_test_marker = "update"] -pub const update: test::TestDescAndFn = test::TestDescAndFn { - desc: test::TestDesc { - name: test::StaticTestName("update"), - ignore: false, - ignore_message: ::core::option::Option::None, - source_file: "tests/05_update.rs", - start_line: 18usize, - start_col: 4usize, - end_line: 18usize, - end_col: 10usize, - compile_fail: false, - no_run: false, - should_panic: test::ShouldPanic::No, - test_type: test::TestType::IntegrationTest, - }, - testfn: test::StaticTestFn(|| test::assert_test_result(update())), -}; -fn update() { - let tf = tests::init(); - let o_v1 = Item(1); - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - db.define::(); - let tx = db.transaction().unwrap(); - { - let mut tables = tx.tables(); - tables.insert(&tx, o_v1.clone()).unwrap(); - } - tx.commit().unwrap(); - let tx_r = db.read_transaction().unwrap(); - { - let mut tables = tx_r.tables(); - let o2: Item = tables.primary_get(&tx_r, &o_v1.p_key()).unwrap().unwrap(); - match (&o_v1, &o2) { - (left_val, right_val) => { - if !(*left_val == *right_val) { - let kind = ::core::panicking::AssertKind::Eq; - ::core::panicking::assert_failed( - kind, - &*left_val, - &*right_val, - ::core::option::Option::None, - ); - } - } - }; - } - let o_v2 = Item(2); - let tx = db.transaction().unwrap(); - { - let mut tables = tx.tables(); - tables.update(&tx, o_v1.clone(), o_v2.clone()).unwrap(); - } - tx.commit().unwrap(); - let tx_r = db.read_transaction().unwrap(); - { - let mut tables = tx_r.tables(); - let o2: Option = tables.primary_get(&tx_r, &o_v1.p_key()).unwrap(); - match (&o2, &None) { - (left_val, right_val) => { - if !(*left_val == *right_val) { - let kind = ::core::panicking::AssertKind::Eq; - ::core::panicking::assert_failed( - kind, - &*left_val, - &*right_val, - ::core::option::Option::None, - ); - } - } - }; - } - let tx_r = db.read_transaction().unwrap(); - { - let mut tables = tx_r.tables(); - let o2: Item = tables.primary_get(&tx_r, &o_v2.p_key()).unwrap().unwrap(); - match (&o_v2, &o2) { - (left_val, right_val) => { - if !(*left_val == *right_val) { - let kind = ::core::panicking::AssertKind::Eq; - ::core::panicking::assert_failed( - kind, - &*left_val, - &*right_val, - ::core::option::Option::None, - ); - } - } - }; - } -} -struct Item1K(u32, String); -impl Item1K { - fn is_native_model() -> bool { - false - } -} -impl struct_db::SDBItem for Item1K { - fn struct_db_bincode_encode_to_vec(&self) -> Vec { - struct_db::bincode_encode_to_vec(self).expect("Failed to serialize the struct #struct_name") - } - fn struct_db_bincode_decode_from_slice(slice: &[u8]) -> Self { - struct_db::bincode_decode_from_slice(slice) - .expect("Failed to deserialize the struct #struct_name") - .0 - } - fn struct_db_schema() -> struct_db::Schema { - let mut secondary_tables_name = std::collections::HashSet::new(); - secondary_tables_name.insert("item1k_s_key"); - struct_db::Schema { - table_name: "item1k", - primary_key: "p_key", - secondary_tables_name: secondary_tables_name, - } - } - fn struct_db_pk(&self) -> Vec { - self.p_key() - } - fn struct_db_gks(&self) -> std::collections::HashMap<&'static str, Vec> { - let mut secondary_tables_name = std::collections::HashMap::new(); - secondary_tables_name.insert("item1k_s_key", self.s_key()); - secondary_tables_name - } -} -/// Index selection Enum for [#struct_name] -pub(crate) enum Item1KKey { - s_key, -} -impl struct_db::KeyDefinition for Item1KKey { - fn secondary_table_name(&self) -> &'static str { - match self { - _ => { - ::std::rt::begin_panic("Unknown key"); - } - } - } -} -#[doc(hidden)] -#[allow(non_upper_case_globals, unused_attributes, unused_qualifications)] -const _: () = { - #[allow(unused_extern_crates, clippy::useless_attribute)] - extern crate serde as _serde; - #[automatically_derived] - impl _serde::Serialize for Item1K { - fn serialize<__S>( - &self, - __serializer: __S, - ) -> _serde::__private::Result<__S::Ok, __S::Error> - where - __S: _serde::Serializer, - { - let mut __serde_state = - _serde::Serializer::serialize_tuple_struct(__serializer, "Item1K", 0 + 1 + 1)?; - _serde::ser::SerializeTupleStruct::serialize_field(&mut __serde_state, &self.0)?; - _serde::ser::SerializeTupleStruct::serialize_field(&mut __serde_state, &self.1)?; - _serde::ser::SerializeTupleStruct::end(__serde_state) - } - } -}; -#[doc(hidden)] -#[allow(non_upper_case_globals, unused_attributes, unused_qualifications)] -const _: () = { - #[allow(unused_extern_crates, clippy::useless_attribute)] - extern crate serde as _serde; - #[automatically_derived] - impl<'de> _serde::Deserialize<'de> for Item1K { - fn deserialize<__D>(__deserializer: __D) -> _serde::__private::Result - where - __D: _serde::Deserializer<'de>, - { - #[doc(hidden)] - struct __Visitor<'de> { - marker: _serde::__private::PhantomData, - lifetime: _serde::__private::PhantomData<&'de ()>, - } - impl<'de> _serde::de::Visitor<'de> for __Visitor<'de> { - type Value = Item1K; - fn expecting( - &self, - __formatter: &mut _serde::__private::Formatter, - ) -> _serde::__private::fmt::Result { - _serde::__private::Formatter::write_str(__formatter, "tuple struct Item1K") - } - #[inline] - fn visit_seq<__A>( - self, - mut __seq: __A, - ) -> _serde::__private::Result - where - __A: _serde::de::SeqAccess<'de>, - { - let __field0 = match _serde::de::SeqAccess::next_element::(&mut __seq)? { - _serde::__private::Some(__value) => __value, - _serde::__private::None => { - return _serde::__private::Err(_serde::de::Error::invalid_length( - 0usize, - &"tuple struct Item1K with 2 elements", - )); - } - }; - let __field1 = match _serde::de::SeqAccess::next_element::(&mut __seq)? - { - _serde::__private::Some(__value) => __value, - _serde::__private::None => { - return _serde::__private::Err(_serde::de::Error::invalid_length( - 1usize, - &"tuple struct Item1K with 2 elements", - )); - } - }; - _serde::__private::Ok(Item1K(__field0, __field1)) - } - } - _serde::Deserializer::deserialize_tuple_struct( - __deserializer, - "Item1K", - 2usize, - __Visitor { - marker: _serde::__private::PhantomData::, - lifetime: _serde::__private::PhantomData, - }, - ) - } - } -}; -#[automatically_derived] -impl ::core::marker::StructuralEq for Item1K {} -#[automatically_derived] -impl ::core::cmp::Eq for Item1K { - #[inline] - #[doc(hidden)] - #[no_coverage] - fn assert_receiver_is_total_eq(&self) -> () { - let _: ::core::cmp::AssertParamIsEq; - let _: ::core::cmp::AssertParamIsEq; - } -} -#[automatically_derived] -impl ::core::marker::StructuralPartialEq for Item1K {} -#[automatically_derived] -impl ::core::cmp::PartialEq for Item1K { - #[inline] - fn eq(&self, other: &Item1K) -> bool { - self.0 == other.0 && self.1 == other.1 - } -} -#[automatically_derived] -impl ::core::fmt::Debug for Item1K { - fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { - ::core::fmt::Formatter::debug_tuple_field2_finish(f, "Item1K", &self.0, &&self.1) - } -} -#[automatically_derived] -impl ::core::clone::Clone for Item1K { - #[inline] - fn clone(&self) -> Item1K { - Item1K( - ::core::clone::Clone::clone(&self.0), - ::core::clone::Clone::clone(&self.1), - ) - } -} -impl Item1K { - pub fn p_key(&self) -> Vec { - self.0.to_be_bytes().to_vec() - } - pub fn s_key(&self) -> Vec { - self.1.as_bytes().to_vec() - } -} -extern crate test; -#[cfg(test)] -#[rustc_test_marker = "update_1k"] -pub const update_1k: test::TestDescAndFn = test::TestDescAndFn { - desc: test::TestDesc { - name: test::StaticTestName("update_1k"), - ignore: false, - ignore_message: ::core::option::Option::None, - source_file: "tests/05_update.rs", - start_line: 84usize, - start_col: 4usize, - end_line: 84usize, - end_col: 13usize, - compile_fail: false, - no_run: false, - should_panic: test::ShouldPanic::No, - test_type: test::TestType::IntegrationTest, - }, - testfn: test::StaticTestFn(|| test::assert_test_result(update_1k())), -}; -fn update_1k() { - let tf = tests::init(); - let o_v1 = Item1K(1, "1".to_string()); - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - db.define::(); - let tx = db.transaction().unwrap(); - { - let mut tables = tx.tables(); - tables.insert(&tx, o_v1.clone()).unwrap(); - } - tx.commit().unwrap(); - let tx_r = db.read_transaction().unwrap(); - { - let mut tables = tx_r.tables(); - let o2: Item1K = tables.primary_get(&tx_r, &o_v1.p_key()).unwrap().unwrap(); - match (&o_v1, &o2) { - (left_val, right_val) => { - if !(*left_val == *right_val) { - let kind = ::core::panicking::AssertKind::Eq; - ::core::panicking::assert_failed( - kind, - &*left_val, - &*right_val, - ::core::option::Option::None, - ); - } - } - }; - } - let tx_r = db.read_transaction().unwrap(); - { - let mut tables = tx_r.tables(); - let o2: Item1K = tables - .secondary_get(&tx_r, Item1KKey::s_key, &o_v1.s_key()) - .unwrap() - .unwrap(); - match (&o_v1, &o2) { - (left_val, right_val) => { - if !(*left_val == *right_val) { - let kind = ::core::panicking::AssertKind::Eq; - ::core::panicking::assert_failed( - kind, - &*left_val, - &*right_val, - ::core::option::Option::None, - ); - } - } - }; - } - let o_v2 = Item1K(2, "2".to_string()); - let tx = db.transaction().unwrap(); - { - let mut tables = tx.tables(); - tables.update(&tx, o_v1.clone(), o_v2.clone()).unwrap(); - } - tx.commit().unwrap(); - let tx_r = db.read_transaction().unwrap(); - { - let mut tables = tx_r.tables(); - let o2: Option = tables.primary_get(&tx_r, &o_v1.p_key()).unwrap(); - match (&o2, &None) { - (left_val, right_val) => { - if !(*left_val == *right_val) { - let kind = ::core::panicking::AssertKind::Eq; - ::core::panicking::assert_failed( - kind, - &*left_val, - &*right_val, - ::core::option::Option::None, - ); - } - } - }; - } - let tx_r = db.read_transaction().unwrap(); - { - let mut tables = tx_r.tables(); - let o2: Option = tables - .secondary_get(&tx_r, Item1KKey::s_key, &o_v1.s_key()) - .unwrap(); - match (&o2, &None) { - (left_val, right_val) => { - if !(*left_val == *right_val) { - let kind = ::core::panicking::AssertKind::Eq; - ::core::panicking::assert_failed( - kind, - &*left_val, - &*right_val, - ::core::option::Option::None, - ); - } - } - }; - } - let tx_r = db.read_transaction().unwrap(); - { - let mut tables = tx_r.tables(); - let o2: Item1K = tables.primary_get(&tx_r, &o_v2.p_key()).unwrap().unwrap(); - match (&o_v2, &o2) { - (left_val, right_val) => { - if !(*left_val == *right_val) { - let kind = ::core::panicking::AssertKind::Eq; - ::core::panicking::assert_failed( - kind, - &*left_val, - &*right_val, - ::core::option::Option::None, - ); - } - } - }; - } -} -#[rustc_main] -#[no_coverage] -pub fn main() -> () { - extern crate test; - test::test_main_static(&[&update, &update_1k]) -} diff --git a/05_update.expanded.rs b/05_update.expanded.rs deleted file mode 100644 index 95f73487..00000000 --- a/05_update.expanded.rs +++ /dev/null @@ -1,592 +0,0 @@ -#![feature(prelude_import)] -#![cfg(not(feature = "native_model"))] -#[prelude_import] -use std::prelude::rust_2018::*; -#[macro_use] -extern crate std; -mod tests { - use shortcut_assert_fs::TmpFs; - #[allow(dead_code)] - pub fn init() -> TmpFs { - TmpFs::new().unwrap() - } -} -use serde::{Deserialize, Serialize}; -use struct_db::*; -struct Item(u32); -impl Item { - fn is_native_model() -> bool { - false - } -} -impl struct_db::SDBItem for Item { - fn struct_db_bincode_encode_to_vec(&self) -> Vec { - struct_db::bincode_encode_to_vec(self).expect("Failed to serialize the struct #struct_name") - } - fn struct_db_bincode_decode_from_slice(slice: &[u8]) -> Self { - struct_db::bincode_decode_from_slice(slice) - .expect("Failed to deserialize the struct #struct_name") - .0 - } - fn struct_db_schema() -> struct_db::Schema { - let mut secondary_tables_name = std::collections::HashSet::new(); - struct_db::Schema { - table_name: "item", - primary_key: "p_key", - secondary_tables_name: secondary_tables_name, - } - } - fn struct_db_pk(&self) -> Vec { - self.p_key() - } - fn struct_db_gks(&self) -> std::collections::HashMap<&'static str, Vec> { - let mut secondary_tables_name = std::collections::HashMap::new(); - secondary_tables_name - } -} -/// Index selection Enum for [#struct_name] -pub(crate) enum ItemKey {} -impl struct_db::KeyDefinition for ItemKey { - fn secondary_table_name(&self) -> &'static str { - match self { - _ => { - ::std::rt::begin_panic("Unknown key"); - } - } - } -} -#[doc(hidden)] -#[allow(non_upper_case_globals, unused_attributes, unused_qualifications)] -const _: () = { - #[allow(unused_extern_crates, clippy::useless_attribute)] - extern crate serde as _serde; - #[automatically_derived] - impl _serde::Serialize for Item { - fn serialize<__S>( - &self, - __serializer: __S, - ) -> _serde::__private::Result<__S::Ok, __S::Error> - where - __S: _serde::Serializer, - { - _serde::Serializer::serialize_newtype_struct(__serializer, "Item", &self.0) - } - } -}; -#[doc(hidden)] -#[allow(non_upper_case_globals, unused_attributes, unused_qualifications)] -const _: () = { - #[allow(unused_extern_crates, clippy::useless_attribute)] - extern crate serde as _serde; - #[automatically_derived] - impl<'de> _serde::Deserialize<'de> for Item { - fn deserialize<__D>(__deserializer: __D) -> _serde::__private::Result - where - __D: _serde::Deserializer<'de>, - { - #[doc(hidden)] - struct __Visitor<'de> { - marker: _serde::__private::PhantomData, - lifetime: _serde::__private::PhantomData<&'de ()>, - } - impl<'de> _serde::de::Visitor<'de> for __Visitor<'de> { - type Value = Item; - fn expecting( - &self, - __formatter: &mut _serde::__private::Formatter, - ) -> _serde::__private::fmt::Result { - _serde::__private::Formatter::write_str(__formatter, "tuple struct Item") - } - #[inline] - fn visit_newtype_struct<__E>( - self, - __e: __E, - ) -> _serde::__private::Result - where - __E: _serde::Deserializer<'de>, - { - let __field0: u32 = ::deserialize(__e)?; - _serde::__private::Ok(Item(__field0)) - } - #[inline] - fn visit_seq<__A>( - self, - mut __seq: __A, - ) -> _serde::__private::Result - where - __A: _serde::de::SeqAccess<'de>, - { - let __field0 = match _serde::de::SeqAccess::next_element::(&mut __seq)? { - _serde::__private::Some(__value) => __value, - _serde::__private::None => { - return _serde::__private::Err(_serde::de::Error::invalid_length( - 0usize, - &"tuple struct Item with 1 element", - )); - } - }; - _serde::__private::Ok(Item(__field0)) - } - } - _serde::Deserializer::deserialize_newtype_struct( - __deserializer, - "Item", - __Visitor { - marker: _serde::__private::PhantomData::, - lifetime: _serde::__private::PhantomData, - }, - ) - } - } -}; -#[automatically_derived] -impl ::core::marker::StructuralEq for Item {} -#[automatically_derived] -impl ::core::cmp::Eq for Item { - #[inline] - #[doc(hidden)] - #[no_coverage] - fn assert_receiver_is_total_eq(&self) -> () { - let _: ::core::cmp::AssertParamIsEq; - } -} -#[automatically_derived] -impl ::core::marker::StructuralPartialEq for Item {} -#[automatically_derived] -impl ::core::cmp::PartialEq for Item { - #[inline] - fn eq(&self, other: &Item) -> bool { - self.0 == other.0 - } -} -#[automatically_derived] -impl ::core::fmt::Debug for Item { - fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { - ::core::fmt::Formatter::debug_tuple_field1_finish(f, "Item", &&self.0) - } -} -#[automatically_derived] -impl ::core::clone::Clone for Item { - #[inline] - fn clone(&self) -> Item { - Item(::core::clone::Clone::clone(&self.0)) - } -} -impl Item { - pub fn p_key(&self) -> Vec { - self.0.to_be_bytes().to_vec() - } -} -extern crate test; -#[cfg(test)] -#[rustc_test_marker = "update"] -pub const update: test::TestDescAndFn = test::TestDescAndFn { - desc: test::TestDesc { - name: test::StaticTestName("update"), - ignore: false, - ignore_message: ::core::option::Option::None, - source_file: "tests/05_update.rs", - start_line: 18usize, - start_col: 4usize, - end_line: 18usize, - end_col: 10usize, - compile_fail: false, - no_run: false, - should_panic: test::ShouldPanic::No, - test_type: test::TestType::IntegrationTest, - }, - testfn: test::StaticTestFn(|| test::assert_test_result(update())), -}; -fn update() { - let tf = tests::init(); - let o_v1 = Item(1); - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - db.define::(); - let tx = db.transaction().unwrap(); - { - let mut tables = tx.tables(); - tables.insert(&tx, o_v1.clone()).unwrap(); - } - tx.commit().unwrap(); - let tx_r = db.read_transaction().unwrap(); - { - let mut tables = tx_r.tables(); - let o2: Item = tables.primary_get(&tx_r, &o_v1.p_key()).unwrap().unwrap(); - match (&o_v1, &o2) { - (left_val, right_val) => { - if !(*left_val == *right_val) { - let kind = ::core::panicking::AssertKind::Eq; - ::core::panicking::assert_failed( - kind, - &*left_val, - &*right_val, - ::core::option::Option::None, - ); - } - } - }; - } - let o_v2 = Item(2); - let tx = db.transaction().unwrap(); - { - let mut tables = tx.tables(); - tables.update(&tx, o_v1.clone(), o_v2.clone()).unwrap(); - } - tx.commit().unwrap(); - let tx_r = db.read_transaction().unwrap(); - { - let mut tables = tx_r.tables(); - let o2: Option = tables.primary_get(&tx_r, &o_v1.p_key()).unwrap(); - match (&o2, &None) { - (left_val, right_val) => { - if !(*left_val == *right_val) { - let kind = ::core::panicking::AssertKind::Eq; - ::core::panicking::assert_failed( - kind, - &*left_val, - &*right_val, - ::core::option::Option::None, - ); - } - } - }; - } - let tx_r = db.read_transaction().unwrap(); - { - let mut tables = tx_r.tables(); - let o2: Item = tables.primary_get(&tx_r, &o_v2.p_key()).unwrap().unwrap(); - match (&o_v2, &o2) { - (left_val, right_val) => { - if !(*left_val == *right_val) { - let kind = ::core::panicking::AssertKind::Eq; - ::core::panicking::assert_failed( - kind, - &*left_val, - &*right_val, - ::core::option::Option::None, - ); - } - } - }; - } -} -struct Item1K(u32, String); -impl Item1K { - fn is_native_model() -> bool { - false - } -} -impl struct_db::SDBItem for Item1K { - fn struct_db_bincode_encode_to_vec(&self) -> Vec { - struct_db::bincode_encode_to_vec(self).expect("Failed to serialize the struct #struct_name") - } - fn struct_db_bincode_decode_from_slice(slice: &[u8]) -> Self { - struct_db::bincode_decode_from_slice(slice) - .expect("Failed to deserialize the struct #struct_name") - .0 - } - fn struct_db_schema() -> struct_db::Schema { - let mut secondary_tables_name = std::collections::HashSet::new(); - secondary_tables_name.insert("item1k_s_key"); - struct_db::Schema { - table_name: "item1k", - primary_key: "p_key", - secondary_tables_name: secondary_tables_name, - } - } - fn struct_db_pk(&self) -> Vec { - self.p_key() - } - fn struct_db_gks(&self) -> std::collections::HashMap<&'static str, Vec> { - let mut secondary_tables_name = std::collections::HashMap::new(); - secondary_tables_name.insert("item1k_s_key", self.s_key()); - secondary_tables_name - } -} -/// Index selection Enum for [#struct_name] -pub(crate) enum Item1KKey { - s_key, -} -impl struct_db::KeyDefinition for Item1KKey { - fn secondary_table_name(&self) -> &'static str { - match self { - Item1KKey::s_key => "item1k_s_key", - _ => { - ::std::rt::begin_panic("Unknown key"); - } - } - } -} -#[doc(hidden)] -#[allow(non_upper_case_globals, unused_attributes, unused_qualifications)] -const _: () = { - #[allow(unused_extern_crates, clippy::useless_attribute)] - extern crate serde as _serde; - #[automatically_derived] - impl _serde::Serialize for Item1K { - fn serialize<__S>( - &self, - __serializer: __S, - ) -> _serde::__private::Result<__S::Ok, __S::Error> - where - __S: _serde::Serializer, - { - let mut __serde_state = - _serde::Serializer::serialize_tuple_struct(__serializer, "Item1K", 0 + 1 + 1)?; - _serde::ser::SerializeTupleStruct::serialize_field(&mut __serde_state, &self.0)?; - _serde::ser::SerializeTupleStruct::serialize_field(&mut __serde_state, &self.1)?; - _serde::ser::SerializeTupleStruct::end(__serde_state) - } - } -}; -#[doc(hidden)] -#[allow(non_upper_case_globals, unused_attributes, unused_qualifications)] -const _: () = { - #[allow(unused_extern_crates, clippy::useless_attribute)] - extern crate serde as _serde; - #[automatically_derived] - impl<'de> _serde::Deserialize<'de> for Item1K { - fn deserialize<__D>(__deserializer: __D) -> _serde::__private::Result - where - __D: _serde::Deserializer<'de>, - { - #[doc(hidden)] - struct __Visitor<'de> { - marker: _serde::__private::PhantomData, - lifetime: _serde::__private::PhantomData<&'de ()>, - } - impl<'de> _serde::de::Visitor<'de> for __Visitor<'de> { - type Value = Item1K; - fn expecting( - &self, - __formatter: &mut _serde::__private::Formatter, - ) -> _serde::__private::fmt::Result { - _serde::__private::Formatter::write_str(__formatter, "tuple struct Item1K") - } - #[inline] - fn visit_seq<__A>( - self, - mut __seq: __A, - ) -> _serde::__private::Result - where - __A: _serde::de::SeqAccess<'de>, - { - let __field0 = match _serde::de::SeqAccess::next_element::(&mut __seq)? { - _serde::__private::Some(__value) => __value, - _serde::__private::None => { - return _serde::__private::Err(_serde::de::Error::invalid_length( - 0usize, - &"tuple struct Item1K with 2 elements", - )); - } - }; - let __field1 = match _serde::de::SeqAccess::next_element::(&mut __seq)? - { - _serde::__private::Some(__value) => __value, - _serde::__private::None => { - return _serde::__private::Err(_serde::de::Error::invalid_length( - 1usize, - &"tuple struct Item1K with 2 elements", - )); - } - }; - _serde::__private::Ok(Item1K(__field0, __field1)) - } - } - _serde::Deserializer::deserialize_tuple_struct( - __deserializer, - "Item1K", - 2usize, - __Visitor { - marker: _serde::__private::PhantomData::, - lifetime: _serde::__private::PhantomData, - }, - ) - } - } -}; -#[automatically_derived] -impl ::core::marker::StructuralEq for Item1K {} -#[automatically_derived] -impl ::core::cmp::Eq for Item1K { - #[inline] - #[doc(hidden)] - #[no_coverage] - fn assert_receiver_is_total_eq(&self) -> () { - let _: ::core::cmp::AssertParamIsEq; - let _: ::core::cmp::AssertParamIsEq; - } -} -#[automatically_derived] -impl ::core::marker::StructuralPartialEq for Item1K {} -#[automatically_derived] -impl ::core::cmp::PartialEq for Item1K { - #[inline] - fn eq(&self, other: &Item1K) -> bool { - self.0 == other.0 && self.1 == other.1 - } -} -#[automatically_derived] -impl ::core::fmt::Debug for Item1K { - fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { - ::core::fmt::Formatter::debug_tuple_field2_finish(f, "Item1K", &self.0, &&self.1) - } -} -#[automatically_derived] -impl ::core::clone::Clone for Item1K { - #[inline] - fn clone(&self) -> Item1K { - Item1K( - ::core::clone::Clone::clone(&self.0), - ::core::clone::Clone::clone(&self.1), - ) - } -} -impl Item1K { - pub fn p_key(&self) -> Vec { - self.0.to_be_bytes().to_vec() - } - pub fn s_key(&self) -> Vec { - self.1.as_bytes().to_vec() - } -} -extern crate test; -#[cfg(test)] -#[rustc_test_marker = "update_1k"] -pub const update_1k: test::TestDescAndFn = test::TestDescAndFn { - desc: test::TestDesc { - name: test::StaticTestName("update_1k"), - ignore: false, - ignore_message: ::core::option::Option::None, - source_file: "tests/05_update.rs", - start_line: 84usize, - start_col: 4usize, - end_line: 84usize, - end_col: 13usize, - compile_fail: false, - no_run: false, - should_panic: test::ShouldPanic::No, - test_type: test::TestType::IntegrationTest, - }, - testfn: test::StaticTestFn(|| test::assert_test_result(update_1k())), -}; -fn update_1k() { - let tf = tests::init(); - let o_v1 = Item1K(1, "1".to_string()); - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - db.define::(); - let tx = db.transaction().unwrap(); - { - let mut tables = tx.tables(); - tables.insert(&tx, o_v1.clone()).unwrap(); - } - tx.commit().unwrap(); - let tx_r = db.read_transaction().unwrap(); - { - let mut tables = tx_r.tables(); - let o2: Item1K = tables.primary_get(&tx_r, &o_v1.p_key()).unwrap().unwrap(); - match (&o_v1, &o2) { - (left_val, right_val) => { - if !(*left_val == *right_val) { - let kind = ::core::panicking::AssertKind::Eq; - ::core::panicking::assert_failed( - kind, - &*left_val, - &*right_val, - ::core::option::Option::None, - ); - } - } - }; - } - let tx_r = db.read_transaction().unwrap(); - { - let mut tables = tx_r.tables(); - let o2: Item1K = tables - .secondary_get(&tx_r, Item1KKey::s_key, &o_v1.s_key()) - .unwrap() - .unwrap(); - match (&o_v1, &o2) { - (left_val, right_val) => { - if !(*left_val == *right_val) { - let kind = ::core::panicking::AssertKind::Eq; - ::core::panicking::assert_failed( - kind, - &*left_val, - &*right_val, - ::core::option::Option::None, - ); - } - } - }; - } - let o_v2 = Item1K(2, "2".to_string()); - let tx = db.transaction().unwrap(); - { - let mut tables = tx.tables(); - tables.update(&tx, o_v1.clone(), o_v2.clone()).unwrap(); - } - tx.commit().unwrap(); - let tx_r = db.read_transaction().unwrap(); - { - let mut tables = tx_r.tables(); - let o2: Option = tables.primary_get(&tx_r, &o_v1.p_key()).unwrap(); - match (&o2, &None) { - (left_val, right_val) => { - if !(*left_val == *right_val) { - let kind = ::core::panicking::AssertKind::Eq; - ::core::panicking::assert_failed( - kind, - &*left_val, - &*right_val, - ::core::option::Option::None, - ); - } - } - }; - } - let tx_r = db.read_transaction().unwrap(); - { - let mut tables = tx_r.tables(); - let o2: Option = tables - .secondary_get(&tx_r, Item1KKey::s_key, &o_v1.s_key()) - .unwrap(); - match (&o2, &None) { - (left_val, right_val) => { - if !(*left_val == *right_val) { - let kind = ::core::panicking::AssertKind::Eq; - ::core::panicking::assert_failed( - kind, - &*left_val, - &*right_val, - ::core::option::Option::None, - ); - } - } - }; - } - let tx_r = db.read_transaction().unwrap(); - { - let mut tables = tx_r.tables(); - let o2: Item1K = tables.primary_get(&tx_r, &o_v2.p_key()).unwrap().unwrap(); - match (&o_v2, &o2) { - (left_val, right_val) => { - if !(*left_val == *right_val) { - let kind = ::core::panicking::AssertKind::Eq; - ::core::panicking::assert_failed( - kind, - &*left_val, - &*right_val, - ::core::option::Option::None, - ); - } - } - }; - } -} -#[rustc_main] -#[no_coverage] -pub fn main() -> () { - extern crate test; - test::test_main_static(&[&update, &update_1k]) -} diff --git a/Cargo.toml b/Cargo.toml index 87cec1f0..7dd0075a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,31 +1,30 @@ [package] -name = "struct_db" +name = "native_db" version = "0.4.3" authors = ["Vincent Herlemont "] edition = "2018" description = "Drop-in embedded database" license = "MIT" -repository = "https://github.com/vincent-herlemont/struct_db" +repository = "https://github.com/vincent-herlemont/native_db" readme = "README.md" build = "build.rs" keywords = ["embedded", "database", "multi-platform", "android", "ios"] categories = ["database-implementations", "concurrency", "data-structures", "caching", "algorithms"] [workspace] -members = ["struct_db_macro"] +members = ["native_db_macro"] [dependencies] -redb = "1.3.0" -struct_db_macro = { version = "0.4.3", path = "struct_db_macro" } +redb = "1.4.0" +native_db_macro = { version = "0.4.3", path = "native_db_macro" } thiserror = "1.0" - -# TODO: make it optional when custom serialization is implemented -serde = { version = "1.0", features = ["derive"] } -bincode = { version = "2.0.0-rc.3", features = ["serde"] } +uuid = { version = "1", features = [ "v4"] } +serde = { version = "1.0" } # Optional tokio support tokio = { version = "1", features = ["sync"], optional = true } -native_model = { git = "https://github.com/vincent-herlemont/native_model.git", optional = true, rev = "69d42f5ec9a4ad671525233e2835efeaae95862e" } +#native_model = { git = "https://github.com/vincent-herlemont/native_model.git", rev = "69d42f5ec9a4ad671525233e2835efeaae95862e" } +native_model = { path = "../native_model" } [dev-dependencies] assert_fs = "1.0" @@ -33,9 +32,15 @@ serial_test = { version = "2.0", features = ["file_locks"] } shortcut_assert_fs = { version = "0.1.0" } skeptic = "0.13" tokio = { version = "1.33", features = ["test-util","macros"] } +bincode = { version = "2.0.0-rc.3", features = ["serde"] } +criterion = { version = "0.5.1" } [features] default = [] +[[bench]] +name = "overhead_data_size" +harness = false + [build-dependencies] skeptic = "0.13" diff --git a/README.md b/README.md index 492238ec..01dd9089 100644 --- a/README.md +++ b/README.md @@ -1,118 +1,139 @@ -# Struct DB 🔧🔩 +# Native DB 🔧🔩 -[![Crates.io](https://img.shields.io/crates/v/struct_db)](https://crates.io/crates/struct_db) -[![Linux/Windows/macOS/Android/iOS (Build/Test/Release)](https://github.com/vincent-herlemont/struct_db/actions/workflows/build_and_test_release.yml/badge.svg)](https://github.com/vincent-herlemont/struct_db/actions/workflows/build_and_test_release.yml) -[![Documentation](https://docs.rs/struct_db/badge.svg)](https://docs.rs/struct_db) -[![License](https://img.shields.io/crates/l/struct_db)](LICENSE) +[![Crates.io](https://img.shields.io/crates/v/native_db)](https://crates.io/crates/native_db) +[![Linux/Windows/macOS/Android/iOS (Build/Test/Release)](https://github.com/vincent-herlemont/native_db/actions/workflows/build_and_test_release.yml/badge.svg)](https://github.com/vincent-herlemont/native_db/actions/workflows/build_and_test_release.yml) +[![Documentation](https://docs.rs/native_db/badge.svg)](https://docs.rs/native_db) +[![License](https://img.shields.io/crates/l/native_db)](LICENSE) [![All Contributors](https://img.shields.io/badge/all_contributors-1-orange.svg)](#contributors-) -Here's a drop-in, fast, embedded database solution based on [redb](https://github.com/cberner/redb) for -multi-platform applications (server, desktop, mobile). -It's focused on maintaining coherence between Rust types and stored data with minimal boilerplate. -It supports multiple indexes, real-time watch with filters, schema migration. Enjoy! 😌🍃. +Here's a drop-in, fast, embedded database solution for multi-platform applications (server, desktop, mobile). It's focused on maintaining coherence between Rust types and stored data with minimal boilerplate. Enjoy! 😌🍃. # Features -- Almost as fast as the storage engine [redb](https://github.com/cberner/redb). -- Embedded database (Linux, macOS, Windows, Android, iOS). -- Support multiple indexes ([unique secondary keys](https://docs.rs/struct_db/latest/struct_db/trait.ReadableTable.html#method.secondary_get)). +- Simple API. +- Support for multiple indexes (primary, secondary, unique, non-unique, optional). +- Minimal boilerplate. +- Transparent serialization/deserialization using [native_model](https://crates.io/crates/native_model). +- Automatic model migration. +- Thread-safe and fully ACID-compliant transactions provided by [redb](https://github.com/cberner/redb). +- Real-time subscription with filters for `insert`, `update` and `delete` operations. - Compatible with all Rust types (`enum`, `struct`, `tuple` etc.). -- [Query data](https://docs.rs/struct_db/latest/struct_db/trait.ReadableTable.html#method.primary_get) (`get`, `watch`, `iter` etc.) using explicit type or type inference. -- [Real-time subscription](https://docs.rs/struct_db/latest/struct_db/struct.Db.html#method.primary_watch) with filters for `insert`, `update` and `delete` operations. -- [Schema migration](https://docs.rs/struct_db/latest/struct_db/struct.Tables.html#method.migrate) using native Rust coercion. -- Fully ACID-compliant transactions. -- _Add your own serialization/deserialization logic [planned*](#roadmap) (e.g: zero-copy)._ -- Thread-safe. +- Hot snapshots. -# Status +# Installation + +Add this to your `Cargo.toml`: +```toml +[dependencies] +native_db = "0.4.3" +native_model = "0.3.30" +``` + +NOTE: `native_db` requires `native_model` to work. -Early development. Not ready for production. Follow the [roadmap](#roadmap) for the 1.0 release. +# Status -# How to use? +Active development. The API is not stable yet and may change in the future. + +# Usual API +- [**DatabaseBuilder**](https://docs.rs/native_db/latest/native_db/struct.DatabaseBuilder.html) + - [**define**](https://docs.rs/native_db/latest/native_db/struct.DatabaseBuilder.html#method.define) a model. + - [**create**](https://docs.rs/native_db/latest/native_db/struct.DatabaseBuilder.html#method.create) / [**open**](https://docs.rs/native_db/latest/native_db/struct.DatabaseBuilder.html#method.open) a database. + - [**create_in_memory**](https://docs.rs/native_db/latest/native_db/struct.DatabaseBuilder.html#method.create_in_memory) an in-memory database. +- [**Database**](https://docs.rs/native_db/latest/native_db/struct.Database.html) + - [**snapshot**](https://docs.rs/native_db/latest/native_db/struct.Database.html#method.snapshot) the database. + - **rw_transaction** open a read-write transaction. + - [**insert**](https://docs.rs/native_db/latest/native_db/native_db/transaction/struct.RwTransaction.html#method.insert) a new item. + - [**update**](https://docs.rs/native_db/latest/native_db/native_db/transaction/struct.RwTransaction.html#method.update) an existing item. + - [**remove**](https://docs.rs/native_db/latest/native_db/native_db/transaction/struct.RwTransaction.html#method.remove) an existing item. + - [**commit**](https://docs.rs/native_db/latest/native_db/native_db/transaction/struct.RwTransaction.html#method.commit) the transaction. + - [**min**](https://docs.rs/native_db/latest/native_db/native_db/transaction/struct.RwTransaction.html#method.min) the minimum primary key. + - plus all read-only transaction APIs. + - **r_transaction** open a read-only transaction. + - **get** + - [**primary**](https://docs.rs/native_db/latest/native_db/transaction/query/struct.RGet.html#method.primary) an item by its primary key. + - [**secondary**](https://docs.rs/native_db/latest/native_db/transaction/query/struct.RGet.html#method.secondary) an item by its secondary key. + - **scan** + - **primary** + - [**all**](https://docs.rs/native_db/latest/native_db/transaction/query/struct.PrimaryScan.html#method.all) items. + - [**start_with**](https://docs.rs/native_db/latest/native_db/transaction/query/struct.PrimaryScan.html#method.start_with) items with a primary key starting with a given value. + - [**range**](https://docs.rs/native_db/latest/native_db/transaction/query/struct.PrimaryScan.html#method.range) items with a primary key in a given range. + - **secondary** + - [**all**](https://docs.rs/native_db/latest/native_db/transaction/query/struct.SecondaryScan.html#method.all) items with a given secondary key. + - [**start_with**](https://docs.rs/native_db/latest/native_db/transaction/query/struct.SecondaryScan.html#method.start_with) items with a secondary key starting with a given value. + - [**range**](https://docs.rs/native_db/latest/native_db/transaction/query/struct.SecondaryScan.html#method.range) items with a secondary key in a given range. + - **len** + - [**primary**](https://docs.rs/native_db/latest/native_db/transaction/query/struct.RLen.html#method.primary) the number of items. + - [**secondary**](https://docs.rs/native_db/latest/native_db/transaction/query/struct.RLen.html#method.secondary) the number of items with a given secondary key. + - **watch** real-time subscriptions via [std channel](https://doc.rust-lang.org/std/sync/mpsc/fn.channel.html) based or [tokio channel](https://docs.rs/tokio/latest/tokio/sync/mpsc/fn.unbounded_channel.html) based depending on the feature `tokio`. + - **get** + - [**primary**](https://docs.rs/native_db/latest/native_db/watch/query/struct.WatchGet.html#method.primary) an item by its primary key. + - [**secondary**](https://docs.rs/native_db/latest/native_db/watch/query/struct.WatchGet.html#method.secondary) an item by its secondary key. + - **scan** + - **primary** + - [**all**](https://docs.rs/native_db/latest/native_db/watch/query/struct.WatchScanPrimary.html#method.all) items. + - [**start_with**](https://docs.rs/native_db/latest/native_db/watch/query/struct.WatchScanPrimary.html#method.start_with) items with a primary key starting with a given value. + - [**range**](https://docs.rs/native_db/latest/native_db/watch/query/struct.WatchScanPrimary.html#method.range) items with a primary key in a given range. + - **secondary** + - [**all**](https://docs.rs/native_db/latest/native_db/watch/query/struct.WatchScanSecondary.html#method.all) items with a given secondary key. + - [**start_with**](https://docs.rs/native_db/latest/native_db/watch/query/struct.WatchScanSecondary.html#method.start_with) items with a secondary key starting with a given value. + - [**range**](https://docs.rs/native_db/latest/native_db/watch/query/struct.WatchScanSecondary.html#method.range) items with a secondary key in a given range. -See [docs.rs](https://docs.rs/struct_db/latest/struct_db/). # Example ```rust use serde::{Deserialize, Serialize}; -use struct_db::*; +use native_db::*; +use native_model::{native_model, Model}; #[derive(Serialize, Deserialize, PartialEq, Debug)] -#[struct_db( - pk = p_key, // required - gk = s_key, // optional - // ... other gk ... -)] -struct Data(u32, String); - -impl Data { - // Returns primary key as big-endian bytes for consistent lexicographical ordering. - pub fn p_key(&self) -> Vec { - self.0.to_be_bytes().to_vec() - } - - // Generates a secondary key combining the String field and the big-endian bytes of - // the primary key for versatile queries. - pub fn s_key(&self) -> Vec { - let mut s_key = self.1.as_bytes().to_vec(); - s_key.extend_from_slice(&self.p_key().as_slice()); - s_key - } - } - - fn main() { - let mut db = Db::init_tmp("my_db_example").unwrap(); - // Initialize the schema - db.define::(); - - // Insert data - let txn = db.transaction().unwrap(); - { - let mut tables = txn.tables(); - tables.insert(&txn, Data(1,"red".to_string())).unwrap(); - tables.insert(&txn, Data(2,"red".to_string())).unwrap(); - tables.insert(&txn, Data(3,"blue".to_string())).unwrap(); - } - txn.commit().unwrap(); - - let txn_read = db.read_transaction().unwrap(); - let mut tables = txn_read.tables(); - - // Retrieve data with p_key=3 - let retrieve_data: Data = tables.primary_get(&txn_read, &3_u32.to_be_bytes()).unwrap().unwrap(); - println!("data p_key='3' : {:?}", retrieve_data); - - // Iterate data with s_key="red" String - for item in tables.secondary_iter_start_with::(&txn_read, DataKey::s_key, "red".as_bytes()).unwrap() { - println!("data s_key='1': {:?}", item); - } - - // Remove data - let txn = db.transaction().unwrap(); - { - let mut tables = txn.tables(); - tables.remove(&txn, retrieve_data).unwrap(); - } - txn.commit().unwrap(); - } +#[native_model(id = 1, version = 1)] +#[native_db] +struct Item { + #[primary_key] + id: u32, + #[secondary_key] + name: String, +} + +fn main() -> Result<(), db_type::Error> { + let mut builder = DatabaseBuilder::new(); + // Initialize the model + builder.define::()?; + + // Create a database in memory + let mut db = builder.create_in_memory()?; + + // Insert data (open a read-write transaction) + let rw = db.rw_transaction().unwrap(); + rw.insert(Item { id: 1, name: "red".to_string() })?; + rw.insert(Item { id: 2, name: "green".to_string() })?; + rw.insert(Item { id: 3, name: "blue".to_string() })?; + rw.commit()?; + + // Open a read-only transaction + let r = db.r_transaction()?; + // Retrieve data with id=3 + let retrieve_data: Item = r.get().primary(3_u32)?.unwrap(); + println!("data id='3': {:?}", retrieve_data); + // Iterate items with name starting with "red" + for item in r.scan().secondary::(ItemKey::name)?.start_with("red") { + println!("data name=\"red\": {:?}", item); + } + + // Remove data (open a read-write transaction) + let rw = db.rw_transaction()?; + rw.remove(retrieve_data)?; + rw.commit()?; + Ok(()) +} ``` -# Roadmap - -The following features are planned before the 1.0 release - -- [ ] Add benchmarks tests. -- [x] Add documentation. -- [x] Stable release of [redb](https://github.com/cberner/redb) or implement another stable storage engine(s) for Linux, macOS, Windows, Android, iOS. -- [ ] Add support for custom serialization/deserialization logic. -- [x] Add CI for Linux, macOS, Windows, Android, iOS. -- [ ] Use in a real-world project. - ## Contributors @@ -121,7 +142,7 @@ The following features are planned before the 1.0 release - +
Akshith Madhur
Akshith Madhur

💻
Akshith Madhur
Akshith Madhur

💻
diff --git a/benches/overhead_data_size.rs b/benches/overhead_data_size.rs new file mode 100644 index 00000000..03c27655 --- /dev/null +++ b/benches/overhead_data_size.rs @@ -0,0 +1,63 @@ +use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; +use native_db::*; +use native_model::{native_model, Model}; +use redb::TableDefinition; +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize, Clone)] +#[native_model(id = 1, version = 1)] +#[native_db] +struct Data { + #[primary_key] + x: u32, + data: Vec, +} + +const TABLE_REDB: TableDefinition = TableDefinition::new("my_data"); +fn use_redb(db: &redb::Database, data: Data) { + let rw = db.begin_write().unwrap(); + { + let mut table = rw.open_table(TABLE_REDB).unwrap(); + let encode = native_model::encode(&data).unwrap(); + table.insert(data.x, encode.as_slice()).unwrap(); + } + rw.commit().unwrap(); +} + +fn use_native_db(db: &native_db::Database, data: Data) { + let rw = db.rw_transaction().unwrap(); + rw.insert(data).unwrap(); + rw.commit().unwrap(); +} + +fn criterion_benchmark(c: &mut Criterion) { + let mut group = c.benchmark_group("insert"); + + // 1 byte, 1KB, 1MB, 10MB, 100MB + for nb_bytes in [1, 1024, 1024 * 1024, 10 * 1024 * 1024, 100 * 1024 * 1024] { + group.throughput(criterion::Throughput::Bytes(nb_bytes as u64)); + + let data = Data { + x: 1, + data: vec![1u8; nb_bytes as usize], + }; + + let redb_backend = redb::backends::InMemoryBackend::new(); + let redb_db = redb::Database::builder() + .create_with_backend(redb_backend) + .unwrap(); + + group.bench_function(BenchmarkId::new("redb", nb_bytes), |b| { + b.iter(|| use_redb(&redb_db, data.clone())) + }); + + let mut native_db = native_db::Database::create_in_memory().unwrap(); + native_db.define::().unwrap(); + group.bench_function(BenchmarkId::new("native_db", nb_bytes), |b| { + b.iter(|| use_native_db(&native_db, data.clone())) + }); + } +} + +criterion_group!(benches, criterion_benchmark); +criterion_main!(benches); diff --git a/cargo_publish.sh b/cargo_publish.sh index 000bcb5b..53bed5ff 100755 --- a/cargo_publish.sh +++ b/cargo_publish.sh @@ -7,7 +7,7 @@ set -x ARG_TOKEN="--token=$CARGO_TOKEN" -cd $DIR/struct_db_macro +cd $DIR/native_db_macro cargo publish $ARG_TOKEN $@ cd $DIR diff --git a/justfile b/justfile index efa71b6c..b90b22e8 100644 --- a/justfile +++ b/justfile @@ -21,4 +21,5 @@ test_all: test_no_default test_default expand test_file_name: + rm -f {{test_file_name}}.expanded.rs; \ cargo expand --test {{test_file_name}} | save --raw {{test_file_name}}.expanded.rs \ No newline at end of file diff --git a/struct_db_macro/.gitignore b/native_db_macro/.gitignore similarity index 100% rename from struct_db_macro/.gitignore rename to native_db_macro/.gitignore diff --git a/struct_db_macro/Cargo.toml b/native_db_macro/Cargo.toml similarity index 69% rename from struct_db_macro/Cargo.toml rename to native_db_macro/Cargo.toml index 5519245f..edcaec3b 100644 --- a/struct_db_macro/Cargo.toml +++ b/native_db_macro/Cargo.toml @@ -1,11 +1,11 @@ [package] -name = "struct_db_macro" +name = "native_db_macro" version = "0.4.3" authors = ["Vincent Herlemont "] edition = "2018" -description = "A procedural macro for struct_db" +description = "A procedural macro for native_db" license = "MIT" -repository = "https://github.com/vincent-herlemont/struct_db" +repository = "https://github.com/vincent-herlemont/native_db" readme = "README.md" [lib] diff --git a/native_db_macro/README.md b/native_db_macro/README.md new file mode 100644 index 00000000..6f578d7e --- /dev/null +++ b/native_db_macro/README.md @@ -0,0 +1 @@ +A procedural macro for native_db \ No newline at end of file diff --git a/native_db_macro/src/keys.rs b/native_db_macro/src/keys.rs new file mode 100644 index 00000000..ec89a773 --- /dev/null +++ b/native_db_macro/src/keys.rs @@ -0,0 +1,139 @@ +use crate::struct_name::StructName; +use crate::ToTokenStream; +use quote::quote; +use std::hash::Hash; +use syn::Ident; + +#[derive(Clone)] +pub(crate) struct DatabaseKeyDefinition { + pub(super) struct_name: StructName, + field_name: Option, + function_name: Option, + pub(crate) options: O, +} + +impl PartialEq for DatabaseKeyDefinition { + fn eq(&self, other: &Self) -> bool { + self.ident() == other.ident() + } +} + +impl Eq for DatabaseKeyDefinition {} + +impl Hash for DatabaseKeyDefinition { + fn hash(&self, state: &mut H) { + self.ident().hash(state); + } +} + +impl ToTokenStream for DatabaseKeyDefinition { + fn new_to_token_stream(&self) -> proc_macro2::TokenStream { + let options = self.options.new_to_token_stream(); + let struct_name = self.struct_name.ident(); + let key_name = self.name(); + quote! { + native_db::db_type::DatabaseKeyDefinition::new(#struct_name::native_model_id(), #struct_name::native_model_version(), #key_name, #options) + } + } +} + +#[derive(Clone)] +pub(crate) struct DatabaseSecondaryKeyOptions { + pub(crate) unique: bool, + pub(crate) optional: bool, +} + +impl ToTokenStream for DatabaseSecondaryKeyOptions { + fn new_to_token_stream(&self) -> proc_macro2::TokenStream { + let unique = self.unique; + let optional = self.optional; + quote! { + native_db::db_type::DatabaseSecondaryKeyOptions { + unique: #unique, + optional: #optional, + } + } + } +} + +impl ToTokenStream for () { + fn new_to_token_stream(&self) -> proc_macro2::TokenStream { + quote! {()} + } +} + +impl Default for DatabaseSecondaryKeyOptions { + fn default() -> Self { + Self { + unique: false, + optional: false, + } + } +} + +impl DatabaseKeyDefinition { + pub(crate) fn name(&self) -> String { + if let Some(field_name) = &self.field_name { + field_name.to_string().to_lowercase() + } else if let Some(function_name) = &self.function_name { + function_name.to_string().to_lowercase() + } else { + panic!("Must be either field or function") + } + } + + pub(crate) fn ident(&self) -> Ident { + if self.is_field() { + self.field_name.as_ref().unwrap().clone() + } else { + self.function_name.as_ref().unwrap().clone() + } + } + + pub(crate) fn new_field(table_name: StructName, field_name: Ident, options: O) -> Self { + Self { + struct_name: table_name, + field_name: Some(field_name), + function_name: None, + options, + } + } + + pub(crate) fn set_function_name(&mut self, function_name: Ident) { + self.function_name = Some(function_name); + } + + pub(crate) fn new_empty(table_name: StructName) -> Self + where + O: Default, + { + Self { + struct_name: table_name, + field_name: None, + function_name: None, + options: O::default(), + } + } + + fn check_field_and_function(&self) { + if self.field_name.is_some() && self.function_name.is_some() { + panic!("Cannot be both field and function") + } else if self.field_name.is_none() && self.function_name.is_none() { + panic!("Must be either field or function") + } + } + + pub(crate) fn is_field(&self) -> bool { + self.check_field_and_function(); + self.field_name.is_some() + } + + pub(crate) fn is_function(&self) -> bool { + self.check_field_and_function(); + self.function_name.is_some() + } + + pub(crate) fn is_empty(&self) -> bool { + self.field_name.is_none() && self.function_name.is_none() + } +} diff --git a/native_db_macro/src/lib.rs b/native_db_macro/src/lib.rs new file mode 100644 index 00000000..87320227 --- /dev/null +++ b/native_db_macro/src/lib.rs @@ -0,0 +1,26 @@ +extern crate proc_macro; + +mod keys; +mod model_attributes; +mod model_native_db; +mod native_db; +mod struct_name; + +use proc_macro::TokenStream; + +use native_db::native_db as native_db_impl; + +#[proc_macro_attribute] +pub fn native_db(args: TokenStream, input: TokenStream) -> TokenStream { + native_db_impl(args, input) +} + +#[proc_macro_derive(KeyAttributes, attributes(primary_key, secondary_key))] +pub fn key_attributes(_input: TokenStream) -> TokenStream { + let gen = quote::quote! {}; + gen.into() +} + +trait ToTokenStream { + fn new_to_token_stream(&self) -> proc_macro2::TokenStream; +} diff --git a/native_db_macro/src/model_attributes.rs b/native_db_macro/src/model_attributes.rs new file mode 100644 index 00000000..a6564882 --- /dev/null +++ b/native_db_macro/src/model_attributes.rs @@ -0,0 +1,96 @@ +use crate::keys::{DatabaseKeyDefinition, DatabaseSecondaryKeyOptions}; +use crate::struct_name::StructName; +use std::collections::HashSet; +use syn::meta::ParseNestedMeta; +use syn::parse::Result; +use syn::Field; + +#[derive(Clone)] +pub(crate) struct ModelAttributes { + pub(crate) struct_name: StructName, + pub(crate) primary_key: Option>, + pub(crate) secondary_keys: HashSet>, +} + +impl ModelAttributes { + pub(crate) fn primary_key(&self) -> DatabaseKeyDefinition<()> { + self.primary_key.clone().expect("Primary key is not set") + } + + pub(crate) fn parse(&mut self, meta: ParseNestedMeta) -> Result<()> { + if meta.path.is_ident("primary_key") { + let mut key: DatabaseKeyDefinition<()> = + DatabaseKeyDefinition::new_empty(self.struct_name.clone()); + meta.parse_nested_meta(|meta| { + if key.is_empty() { + key.set_function_name(meta.path.get_ident().unwrap().clone()); + } else { + panic!( + "Unknown attribute: {}", + meta.path.get_ident().unwrap().to_string() + ); + } + Ok(()) + })?; + self.primary_key = Some(key); + } else if meta.path.is_ident("secondary_key") { + let mut key: DatabaseKeyDefinition = + DatabaseKeyDefinition::new_empty(self.struct_name.clone()); + meta.parse_nested_meta(|meta| { + if key.is_empty() { + key.set_function_name(meta.path.get_ident().unwrap().clone()); + } else if meta.path.is_ident("unique") { + key.options.unique = true; + } else if meta.path.is_ident("optional") { + key.options.optional = true; + } else { + panic!( + "Unknown attribute: {}", + meta.path.get_ident().unwrap().to_string() + ); + } + Ok(()) + })?; + self.secondary_keys.insert(key); + } else { + panic!( + "Unknown attribute: {}", + meta.path.get_ident().unwrap().to_string() + ); + } + Ok(()) + } + + pub(crate) fn parse_field(&mut self, field: &Field) -> Result<()> { + for attr in &field.attrs { + if attr.path().is_ident("primary_key") { + self.primary_key = Some(DatabaseKeyDefinition::new_field( + self.struct_name.clone(), + field.ident.clone().unwrap(), + (), + )); + } else if attr.path().is_ident("secondary_key") { + let mut secondary_options = DatabaseSecondaryKeyOptions::default(); + if let Ok(_) = attr.meta.require_list() { + attr.parse_nested_meta(|meta| { + if meta.path.is_ident("unique") { + secondary_options.unique = true; + } else if meta.path.is_ident("optional") { + secondary_options.optional = true; + } else { + panic!("secondary_key support only 'unique' or 'composable'"); + } + Ok(()) + })?; + } + + self.secondary_keys.insert(DatabaseKeyDefinition::new_field( + self.struct_name.clone(), + field.ident.clone().unwrap(), + secondary_options, + )); + } + } + Ok(()) + } +} diff --git a/native_db_macro/src/model_native_db.rs b/native_db_macro/src/model_native_db.rs new file mode 100644 index 00000000..91caf83f --- /dev/null +++ b/native_db_macro/src/model_native_db.rs @@ -0,0 +1,153 @@ +use crate::model_attributes::ModelAttributes; +use crate::struct_name::StructName; +use crate::ToTokenStream; +use proc_macro::Span; +use quote::quote; +use syn::Ident; + +pub(crate) struct ModelNativeDB { + struct_name: StructName, + attrs: ModelAttributes, +} + +impl ModelNativeDB { + pub fn new(struct_name: StructName, attrs: ModelAttributes) -> Self { + Self { struct_name, attrs } + } + + pub(crate) fn native_db_secondary_key(&self) -> proc_macro2::TokenStream { + let tokens = self + .attrs + .secondary_keys + .iter() + .map(|key| { + let key_ident = key.ident(); + let new_secondary_key = key.new_to_token_stream(); + let out = if key.is_field() { + if key.options.optional { + quote! { + let value: Option = self.#key_ident.as_ref().map(|v|v.database_inner_key_value()); + let value = native_db::db_type::DatabaseKeyValue::Optional(value); + } + } else { + quote! { + let value: native_db::db_type::DatabaseInnerKeyValue = self.#key_ident.database_inner_key_value(); + let value = native_db::db_type::DatabaseKeyValue::Default(value); + } + } + } else if key.is_function() { + if key.options.optional { + quote! { + let value: Option = self.#key_ident().map(|v|v.database_inner_key_value()); + let value = native_db::db_type::DatabaseKeyValue::Optional(value); + } + } else { + quote! { + let value: native_db::db_type::DatabaseInnerKeyValue = self.#key_ident().database_inner_key_value(); + let value = native_db::db_type::DatabaseKeyValue::Default(value); + } + } + } else { + panic!("Unknown key type") + }; + + quote! { + #out + secondary_tables_name.insert(#new_secondary_key, value); + } + }) + .collect::>(); + + quote! { + fn native_db_secondary_keys(&self) -> std::collections::HashMap, native_db::db_type::DatabaseKeyValue> { + let mut secondary_tables_name = std::collections::HashMap::new(); + #(#tokens)* + secondary_tables_name + } + } + } + + pub(crate) fn native_db_primary_key(&self) -> proc_macro2::TokenStream { + let primary_key = self.attrs.primary_key(); + let ident = primary_key.ident(); + if primary_key.is_function() { + quote! { + fn native_db_primary_key(&self) -> native_db::db_type::DatabaseInnerKeyValue { + self.#ident().database_inner_key_value() + } + } + } else { + quote! { + fn native_db_primary_key(&self) -> native_db::db_type::DatabaseInnerKeyValue { + self.#ident.database_inner_key_value() + } + } + } + } + + pub(crate) fn native_db_model(&self) -> proc_macro2::TokenStream { + let primary_key = self.attrs.primary_key().new_to_token_stream(); + let secondary_keys = self + .attrs + .secondary_keys + .iter() + .map(|key| { + let new_key = key.new_to_token_stream(); + quote! { + secondary_tables_name.insert(#new_key); + } + }) + .collect::>(); + + quote! { + fn native_db_model() -> native_db::Model { + let mut secondary_tables_name = std::collections::HashSet::new(); + #(#secondary_keys)* + native_db::Model { + primary_key: #primary_key, + secondary_keys: secondary_tables_name, + } + } + } + } + + pub(crate) fn keys_enum_name(&self) -> Ident { + let struct_name = self.struct_name.ident(); + Ident::new(&format!("{}Key", struct_name), Span::call_site().into()) + } + + pub(crate) fn secondary_keys_enum(&self) -> Vec { + self.attrs + .secondary_keys + .iter() + .map(|key| { + let name = key.ident(); + quote! { + #[allow(non_camel_case_types,dead_code)] + #name + } + }) + .collect::>() + } + + pub(crate) fn keys_enum_database_key(&self) -> proc_macro2::TokenStream { + let keys_enum_name_token = self.keys_enum_name(); + + let insert_secondary_key_def = self.attrs.secondary_keys.iter().map(|key| { + let name = key.ident(); + let new_key = key.new_to_token_stream(); + quote! { + #keys_enum_name_token::#name => #new_key, + } + }); + + quote! { + fn database_key(&self) -> native_db::db_type::DatabaseKeyDefinition { + match self { + #(#insert_secondary_key_def)* + _ => panic!("Unknown key"), + } + } + } + } +} diff --git a/native_db_macro/src/native_db.rs b/native_db_macro/src/native_db.rs new file mode 100644 index 00000000..0af41337 --- /dev/null +++ b/native_db_macro/src/native_db.rs @@ -0,0 +1,70 @@ +use crate::model_attributes::ModelAttributes; +use crate::model_native_db::ModelNativeDB; +use crate::struct_name::StructName; +use proc_macro::TokenStream; +use quote::quote; +use syn::{parse_macro_input, Data, DeriveInput, Fields}; + +pub fn native_db(args: TokenStream, input: TokenStream) -> TokenStream { + let ast = parse_macro_input!(input as DeriveInput); + let struct_name = StructName::new(ast.ident.clone()); + + let mut attrs = ModelAttributes { + struct_name: struct_name.clone(), + primary_key: None, + secondary_keys: Default::default(), + }; + let model_attributes_parser = syn::meta::parser(|meta| attrs.parse(meta)); + parse_macro_input!(args with model_attributes_parser); + + if let Data::Struct(data_struct) = &ast.data { + if let Fields::Named(fields) = &data_struct.fields { + for field in &fields.named { + if let Err(err) = attrs.parse_field(field) { + return TokenStream::from(err.to_compile_error()); + } + } + } + } + + let model_native_db = ModelNativeDB::new(struct_name.clone(), attrs.clone()); + + let native_db_pk = model_native_db.native_db_primary_key(); + let native_db_gks = model_native_db.native_db_secondary_key(); + let native_db_model = model_native_db.native_db_model(); + + let keys_enum_name = model_native_db.keys_enum_name(); + let keys_enum = model_native_db.secondary_keys_enum(); + let keys_enum_database_key = model_native_db.keys_enum_database_key(); + + let struct_name = struct_name.ident(); + let gen = quote! { + #[derive(native_db::KeyAttributes)] + #ast + + impl native_db::db_type::Input for #struct_name { + fn native_db_bincode_encode_to_vec(&self) -> Vec { + native_db::bincode_encode_to_vec(self).expect("Failed to serialize the struct #struct_name") + } + + fn native_db_bincode_decode_from_slice(slice: &[u8]) -> Self { + native_db::bincode_decode_from_slice(slice).expect("Failed to deserialize the struct #struct_name").0 + } + + #native_db_model + #native_db_pk + #native_db_gks + } + + /// Index selection Enum for [#struct_name] + pub(crate) enum #keys_enum_name { + #(#keys_enum),* + } + + impl native_db::db_type::KeyDefinition for #keys_enum_name { + #keys_enum_database_key + } + }; + + gen.into() +} diff --git a/native_db_macro/src/struct_name.rs b/native_db_macro/src/struct_name.rs new file mode 100644 index 00000000..685a9594 --- /dev/null +++ b/native_db_macro/src/struct_name.rs @@ -0,0 +1,13 @@ +use proc_macro2::Ident; + +#[derive(Clone)] +pub(crate) struct StructName(Ident); + +impl StructName { + pub(crate) fn ident(&self) -> &Ident { + &self.0 + } + pub(crate) fn new(ident: Ident) -> Self { + Self(ident) + } +} diff --git a/renovate.json b/renovate.json index 067588e4..62ddf3f1 100644 --- a/renovate.json +++ b/renovate.json @@ -1,5 +1,5 @@ { - "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "$model": "https://docs.renovatebot.com/renovate-model.json", "extends": ["config:base"], "semanticCommits": "enabled", "semanticCommitType": "chore", @@ -17,5 +17,16 @@ "matchUpdateTypes": ["major", "minor", "patch"], "automerge": true } + ], + "regexManagers": [ + { + "fileMatch": ["^README\\.md$"], + "matchStrings": [ + "\"native_model\" = \"(?.*?)\"" + ], + "datasource": "crate", + "depNameTemplate": "native_model", + "versioningTemplate": "semver" + } ] } \ No newline at end of file diff --git a/src/builder.rs b/src/builder.rs index 2733829d..a424d3a4 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -1,23 +1,18 @@ -use super::Result; -use crate::{watch, Db}; +use crate::db_type::Result; +use crate::table_definition::NativeModelOptions; +use crate::{watch, Database, Input, Model}; use std::collections::HashMap; use std::path::Path; use std::sync::atomic::AtomicU64; use std::sync::{Arc, RwLock}; -/// Builder for the [`Db`](super::Db) instance. -pub struct Builder { +/// Builder for the [`Db`](super::Database) instance. +pub struct DatabaseBuilder { cache_size_bytes: Option, + models_builder: HashMap, } -impl Builder { - /// Similar to [redb::Builder::new()](https://docs.rs/redb/latest/redb/struct.Builder.html#method.new). - pub fn new() -> Self { - Self { - cache_size_bytes: None, - } - } - +impl DatabaseBuilder { fn new_rdb_builder(&self) -> redb::Builder { let mut redb_builder = redb::Builder::new(); if let Some(cache_size_bytes) = self.cache_size_bytes { @@ -26,12 +21,28 @@ impl Builder { redb_builder } - fn new_redb(redb_database: redb::Database) -> Db { - Db { + fn init<'a>(&'a self, redb_database: redb::Database) -> Database<'a> { + let mut database = Database { instance: redb_database, primary_table_definitions: HashMap::new(), watchers: Arc::new(RwLock::new(watch::Watchers::new())), watchers_counter_id: AtomicU64::new(0), + }; + + for (_, model_builder) in &self.models_builder { + database.seed_model(&model_builder); + } + + database + } +} + +impl DatabaseBuilder { + /// Similar to [redb::Builder::new()](https://docs.rs/redb/latest/redb/struct.Builder.html#method.new). + pub fn new() -> Self { + Self { + cache_size_bytes: None, + models_builder: HashMap::new(), } } @@ -44,32 +55,272 @@ impl Builder { /// Creates a new `Db` instance using the given path. /// /// Similar to [redb::Builder.create(...)](https://docs.rs/redb/latest/redb/struct.Builder.html#method.create) - pub fn create(&self, path: impl AsRef) -> Result { + pub fn create(&self, path: impl AsRef) -> Result { let db = self.new_rdb_builder().create(path)?; - Ok(Self::new_redb(db)) - } - - /// Creates a new `Db` instance using [Builder::create] in order to create it in a temporary - /// directory with the given path. - /// - /// Example: `builder::create_tmp('project/my_db')` will create the db to `/tmp/project/my_db`. - pub fn create_tmp(&self, path: impl AsRef) -> Result { - let tmp_dir = std::env::temp_dir(); - let tmp_dir = tmp_dir.join(path); - self.create(tmp_dir.as_path()) + // Ok(Self::from_redb(db)) + Ok(self.init(db)) } /// Similar to [redb::Builder::open(...)](https://docs.rs/redb/latest/redb/struct.Builder.html#method.open) - pub fn open(&self, path: impl AsRef) -> Result { + pub fn open(&self, path: impl AsRef) -> Result { let db = self.new_rdb_builder().open(path)?; - Ok(Self::new_redb(db)) + // Ok(Self::from_redb(db)) + Ok(self.init(db)) } - /// Similar to [Builder::open] in order to open a database in a temporary directory with the - /// given path. - pub fn open_tmp(&self, path: impl AsRef) -> Result { - let tmp_dir = std::env::temp_dir(); - let tmp_dir = tmp_dir.join(path); - self.open(tmp_dir.as_path()) + pub fn create_in_memory(&self) -> Result { + let in_memory_backend = redb::backends::InMemoryBackend::new(); + let db = self.new_rdb_builder(); + let db = db.create_with_backend(in_memory_backend)?; + // Ok(Self::from_redb(db)) + Ok(self.init(db)) } + + /// Defines a table using the given model. + /// + /// Native DB depends of `native_model` to define the model. + /// And `native_model` by default uses [`serde`](https://serde.rs/) to serialize and deserialize the data but + /// you can use any other serialization library see the documentation of [`native_model`](https://github.com/vincent-herlemont/native_model) for more information. + /// So in the example below we import `serde` and we use the `Serialize` and `Deserialize` traits. + /// + /// # Primary key + /// + /// The primary key is *strict*, you **must**: + /// - define it. + /// - define only one. + /// + /// If the primary key is not defined, the compiler will return an error `Primary key is not set`. + /// + /// You can define with two ways: + /// - `#[primary_key]` on the field + /// - `#[native_db(primary_key())]` on any type `enum`, `struct`, `tuple struct` or `unit struct`. + /// + /// By default is **unique** so you can't have two instances of the model with the same primary key saved in the database. + /// + /// ## Define a simple model with a primary key + /// ```rust + /// use native_db::*; + /// use native_model::{native_model, Model}; + /// use serde::{Deserialize, Serialize}; + /// + /// #[derive(Serialize, Deserialize)] + /// #[native_model(id=1, version=1)] + /// #[native_db] + /// struct Data { + /// #[primary_key] + /// id: u64, + /// } + /// + /// fn main() -> Result<(), db_type::Error> { + /// let mut builder = DatabaseBuilder::new(); + /// builder.define::() + /// } + /// ``` + /// ## Define a model with a method as primary key + /// ```rust + /// use native_db::*; + /// use native_model::{native_model, Model}; + /// use serde::{Deserialize, Serialize}; + /// + /// #[derive(Serialize, Deserialize)] + /// #[native_model(id=1, version=1)] + /// #[native_db( + /// primary_key(custom_id) + /// )] + /// struct Data(u64); + /// + /// impl Data { + /// fn custom_id(&self) -> u32 { + /// (self.0 + 1) as u32 + /// } + /// } + /// + /// ``` + /// + /// ## Secondary key + /// + /// The secondary key is *flexible*, you can: + /// - define it or not. + /// - define one or more. + /// + /// You can define with two ways: + /// - `#[secondary_key]` on the field + /// - `#[native_db(secondary_key(, ))]` on any type `enum`, `struct`, `tuple struct` or `unit struct`. + /// + /// The secondary key can have two options: + /// - [`unique`](#unique) (default: false) + /// - [`optional`](#optional) (default: false) + /// + /// ## Define a model with a secondary key + /// ```rust + /// use native_db::*; + /// use native_model::{native_model, Model}; + /// use serde::{Deserialize, Serialize}; + /// + /// #[derive(Serialize, Deserialize)] + /// #[native_model(id=1, version=1)] + /// #[native_db] + /// struct Data { + /// #[primary_key] + /// id: u64, + /// #[secondary_key] + /// name: String, + /// } + /// ``` + /// + /// ## Define a model wit a secondary key optional and unique + /// ```rust + /// use native_db::*; + /// use native_model::{native_model, Model}; + /// use serde::{Deserialize, Serialize}; + /// + /// #[derive(Serialize, Deserialize)] + /// #[native_model(id=1, version=1)] + /// #[native_db] + /// struct Data { + /// #[primary_key] + /// id: u64, + /// #[secondary_key(unique, optional)] + /// name: Option, + /// } + /// ``` + /// - Note: the secondary key can be `unique` **or** `optional` as well. + /// + /// ## Unique + /// + /// This means that each instance of the model must have a unique value for the secondary key. + /// If the value is not unique, the [`insert`](crate::transaction::RwTransaction::insert) method will return an error. + /// + /// ## Optional + /// + /// This means that an instance of the model can have a value for the secondary key or not. + /// When`optional` is set the value **must** be an [`Option`](https://doc.rust-lang.org/std/option/enum.Option.html). + /// if the value is not an [`Option`](https://doc.rust-lang.org/std/option/enum.Option.html) the compiler will return + /// an error `error[E0282]: type annotations needed: cannot infer type`. + /// + /// Under the hood, the secondary key is stored in a separate redb table. So if the secondary key is optional, + /// the value will be stored in the table only if the value is not `None`. + /// + /// # Define a model with a secondary key and a custom secondary key optional + /// ```rust + /// use native_db::*; + /// use native_model::{native_model, Model}; + /// use serde::{Deserialize, Serialize}; + /// + /// #[derive(Serialize, Deserialize)] + /// #[native_model(id=1, version=1)] + /// #[native_db( + /// secondary_key(custom_name, optional) + /// )] + /// struct Data { + /// #[primary_key] + /// id: u64, + /// #[secondary_key] + /// name: String, + /// flag: bool, + /// } + /// + /// impl Data { + /// fn custom_name(&self) -> Option { + /// if self.flag { + /// Some(self.name.clone().to_uppercase()) + /// } else { + /// None + /// } + /// } + /// } + /// ``` + /// # Define multiple models + /// + /// To define multiple models, you **must** use different `id` for each model. If you use the same `id` for two models, + /// the program will panic with the message `The table has the same native model version as the table and it's not allowed`. + /// + /// Example: + /// ```rust + /// use native_db::*; + /// use native_model::{native_model, Model}; + /// use serde::{Deserialize, Serialize}; + /// + /// #[derive(Serialize, Deserialize)] + /// #[native_model(id=1, version=1)] + /// #[native_db] + /// struct Animal { + /// #[primary_key] + /// name: String, + /// } + /// + /// #[derive(Serialize, Deserialize)] + /// #[native_model(id=2, version=1)] + /// #[native_db] + /// struct Vegetable { + /// #[primary_key] + /// name: String, + /// } + /// + /// fn main() -> Result<(), db_type::Error> { + /// let mut builder = DatabaseBuilder::new(); + /// builder.define::()?; + /// builder.define::() + /// } + /// ``` + pub fn define(&mut self) -> Result<()> { + let mut new_model_builder = ModelBuilder { + model: T::native_db_model(), + native_model_options: NativeModelOptions::default(), + }; + + new_model_builder.native_model_options.native_model_id = T::native_model_id(); + new_model_builder.native_model_options.native_model_version = T::native_model_version(); + + // Set native model legacy + for model in self.models_builder.values_mut() { + if model.native_model_options.native_model_version + > new_model_builder.native_model_options.native_model_version + { + model.native_model_options.native_model_legacy = false; + new_model_builder.native_model_options.native_model_legacy = true; + } else { + model.native_model_options.native_model_legacy = true; + new_model_builder.native_model_options.native_model_legacy = false; + } + + // Panic if native model version are the same + if model.native_model_options.native_model_id + == new_model_builder.native_model_options.native_model_id + && model.native_model_options.native_model_version + == new_model_builder.native_model_options.native_model_version + { + panic!( + "The table {} has the same native model version as the table {} and it's not allowed", + model.model.primary_key.unique_table_name, + new_model_builder.model.primary_key.unique_table_name, + ); + } + } + + self.models_builder.insert( + new_model_builder + .model + .primary_key + .unique_table_name + .clone(), + new_model_builder, + ); + + // for secondary_key in model.secondary_keys { + // model_builder.secondary_tables.insert( + // secondary_key.clone(), + // redb::TableDefinition::new(&secondary_key.table_name).into(), + // ); + // } + // self.primary_table_definitions + // .insert(model.primary_key.table_name, primary_table_definition); + + Ok(()) + } +} + +pub(crate) struct ModelBuilder { + pub(crate) model: Model, + pub(crate) native_model_options: NativeModelOptions, } diff --git a/src/common.rs b/src/common.rs deleted file mode 100644 index ce720be6..00000000 --- a/src/common.rs +++ /dev/null @@ -1,11 +0,0 @@ -use crate::SDBItem; - -pub(crate) fn unwrap_item(item: Option>) -> Option { - if let Some(item) = item { - let item = item.value(); - let item = T::struct_db_bincode_decode_from_slice(item); - Some(item) - } else { - None - } -} diff --git a/src/database.rs b/src/database.rs new file mode 100644 index 00000000..fc979f62 --- /dev/null +++ b/src/database.rs @@ -0,0 +1,165 @@ +use crate::builder::ModelBuilder; +use crate::db_type::Result; +use crate::stats::{Stats, StatsTable}; +use crate::table_definition::PrimaryTableDefinition; +use crate::transaction::internal::r_transaction::InternalRTransaction; +use crate::transaction::internal::rw_transaction::InternalRwTransaction; +use crate::transaction::RTransaction; +use crate::transaction::RwTransaction; +use crate::watch; +use crate::watch::query::{InternalWatch, Watch}; +use redb::TableHandle; +use std::cell::RefCell; +use std::collections::HashMap; +use std::sync::atomic::AtomicU64; +use std::sync::{Arc, RwLock}; +use std::u64; + +/// The [Database] is the main entry point to interact with the database. +/// +/// # Example +/// ```rust +/// use native_db::*; +/// +/// fn main() -> Result<(), db_type::Error> { +/// let builder = DatabaseBuilder::new(); +/// // Define models ... +/// let db = builder.create_in_memory()?; +/// // Open transactions +/// // Watch data +/// // Create snapshots +/// // etc... +/// Ok(()) +/// } +pub struct Database<'a> { + pub(crate) instance: redb::Database, + pub(crate) primary_table_definitions: HashMap>, + pub(crate) watchers: Arc>, + pub(crate) watchers_counter_id: AtomicU64, +} + +impl<'a> Database<'a> { + pub(crate) fn seed_model(&mut self, model_builder: &'a ModelBuilder) { + let main_table_definition = + redb::TableDefinition::new(model_builder.model.primary_key.unique_table_name.as_str()); + let mut primary_table_definition: PrimaryTableDefinition = + (model_builder, main_table_definition).into(); + + for secondary_key in model_builder.model.secondary_keys.iter() { + primary_table_definition.secondary_tables.insert( + secondary_key.clone(), + redb::TableDefinition::new(secondary_key.unique_table_name.as_str()).into(), + ); + } + + self.primary_table_definitions.insert( + model_builder.model.primary_key.unique_table_name.clone(), + primary_table_definition, + ); + } + + pub fn redb_stats(&self) -> Result { + use redb::ReadableTable; + let rx = self.instance.begin_read()?; + let mut stats_primary_tables = vec![]; + for primary_table in self.primary_table_definitions.values() { + let result_table_open = rx.open_table(primary_table.redb.clone()); + let stats_table = match result_table_open { + Err(redb::TableError::TableDoesNotExist(_)) => StatsTable { + name: primary_table.redb.name().to_string(), + n_entries: None, + }, + Ok(table_open) => { + let num_raw = table_open.len()?; + StatsTable { + name: primary_table.redb.name().to_string(), + n_entries: Some(num_raw), + } + } + Err(err) => { + return Err(err.into()); + } + }; + stats_primary_tables.push(stats_table); + } + let mut stats_secondary_tables = vec![]; + for primary_table in self.primary_table_definitions.values() { + for secondary_table in primary_table.secondary_tables.values() { + let result_table_open = rx.open_table(secondary_table.redb.clone()); + let stats_table = match result_table_open { + Err(redb::TableError::TableDoesNotExist(_)) => StatsTable { + name: secondary_table.redb.name().to_string(), + n_entries: None, + }, + Ok(table_open) => { + let num_raw = table_open.len()?; + StatsTable { + name: secondary_table.redb.name().to_string(), + n_entries: Some(num_raw), + } + } + Err(err) => { + return Err(err.into()); + } + }; + stats_secondary_tables.push(stats_table); + } + } + stats_primary_tables.sort_by(|a, b| a.name.cmp(&b.name)); + stats_secondary_tables.sort_by(|a, b| a.name.cmp(&b.name)); + Ok(Stats { + primary_tables: stats_primary_tables, + secondary_tables: stats_secondary_tables, + }) + } +} + +impl Database<'_> { + /// Creates a new read-write transaction. + pub fn rw_transaction(&self) -> Result { + let rw = self.instance.begin_write()?; + let write_txn = RwTransaction { + watcher: &self.watchers, + batch: RefCell::new(watch::Batch::new()), + internal: InternalRwTransaction { + redb_transaction: rw, + primary_table_definitions: &self.primary_table_definitions, + }, + }; + Ok(write_txn) + } + + /// Creates a new read-only transaction. + pub fn r_transaction(&self) -> Result { + let txn = self.instance.begin_read()?; + let read_txn = RTransaction { + internal: InternalRTransaction { + redb_transaction: txn, + table_definitions: &self.primary_table_definitions, + }, + }; + Ok(read_txn) + } +} + +impl Database<'_> { + /// Watch queries. + pub fn watch(&self) -> Watch { + Watch { + internal: InternalWatch { + watchers: &self.watchers, + watchers_counter_id: &self.watchers_counter_id, + }, + } + } + + /// Unwatch the given `id`. + /// You can get the `id` from the return value of [`primary_watch`](#method.primary_watch). + /// If the `id` is not valid anymore, this function will do nothing. + /// If the `id` is valid, the corresponding watcher will be removed. + pub fn unwatch(&self, id: u64) -> Result<()> { + let mut watchers = self.watchers.write().unwrap(); + watchers.remove_sender(id); + Ok(()) + } +} diff --git a/src/db.rs b/src/db.rs deleted file mode 100644 index c906b684..00000000 --- a/src/db.rs +++ /dev/null @@ -1,465 +0,0 @@ -use crate::builder::Builder; -use crate::stats::{Stats, StatsTable}; -use crate::table_definition::PrimaryTableDefinition; -use crate::watch::MpscReceiver; -use crate::{watch, ReadableTable}; -use crate::{Error, KeyDefinition, ReadOnlyTransaction, Result, SDBItem, Transaction}; -use redb::TableHandle; -use std::cell::RefCell; -use std::collections::HashMap; -use std::fmt::Debug; -use std::path::Path; -use std::sync::atomic::AtomicU64; -use std::sync::{Arc, Mutex, RwLock}; -use std::u64; - -/// The `Db` struct represents a database instance. It allows add **schema**, create **transactions** and **watcher**. -pub struct Db { - pub(crate) instance: redb::Database, - pub(crate) primary_table_definitions: HashMap<&'static str, PrimaryTableDefinition>, - pub(crate) watchers: Arc>, - pub(crate) watchers_counter_id: AtomicU64, -} - -impl Db { - /// Creates a new [Db] instance using the given path. - /// - /// Use [redb::Builder.create(...)](https://docs.rs/redb/latest/redb/struct.Builder.html#method.create) - pub fn create(path: impl AsRef) -> Result { - Builder::new().create(path) - } - - /// Creates a new [Db] instance using a temporary directory with the given path. - /// - /// Example: `Db::create_tmp('project/my_db')` will create the db to `/tmp/project/my_db`. - /// - /// Use [redb::Builder.create(...)](https://docs.rs/redb/latest/redb/struct.Builder.html#method.create) - pub fn create_tmp(path: impl AsRef) -> Result { - Builder::new().create_tmp(path) - } - - /// Opens an existing [Db] instance using the given path. - pub fn open(path: impl AsRef) -> Result { - Builder::new().open(path) - } - - /// Opens an existing [Db] instance using a temporary directory with the given path. - pub fn open_tmp(path: impl AsRef) -> Result { - Builder::new().open_tmp(path) - } - - /// Defines a table using the given schema. - /// - /// # Example - /// ``` - /// use serde::{Deserialize, Serialize}; - /// use struct_db::*; - /// - /// #[derive(Serialize, Deserialize, Eq, PartialEq, Debug)] - /// #[struct_db(pk = p_key)] - /// struct Data(u32); - /// impl Data {pub fn p_key(&self) -> Vec {self.0.to_be_bytes().to_vec()}} - /// - /// fn main() { - /// let mut db = Db::create_tmp("my_db_as").unwrap(); - /// // Initialize the table - /// db.define::(); - /// } - pub fn define(&mut self) -> Result<()> { - let schema = T::struct_db_schema(); - let main_table_name = schema.table_name; - let main_table_definition = redb::TableDefinition::new(main_table_name); - let mut primary_table_definition: PrimaryTableDefinition = - (schema.clone(), main_table_definition).into(); - - #[cfg(feature = "native_model")] - { - primary_table_definition.native_model_id = T::native_model_id(); - primary_table_definition.native_model_version = T::native_model_version(); - - // Set native model legacy - for other_primary_table_definition in self.primary_table_definitions.values_mut() { - if other_primary_table_definition.native_model_version - > primary_table_definition.native_model_version - { - other_primary_table_definition.native_model_legacy = false; - primary_table_definition.native_model_legacy = true; - } else { - other_primary_table_definition.native_model_legacy = true; - primary_table_definition.native_model_legacy = false; - } - - // Panic if native model version are the same - if other_primary_table_definition.native_model_version - == primary_table_definition.native_model_version - { - panic!( - "The table {} has the same native model version as the table {} and it's not allowed", - other_primary_table_definition.redb.name(), - primary_table_definition.redb.name() - ); - } - } - } - - for secondary_table_name in schema.secondary_tables_name { - primary_table_definition.secondary_tables.insert( - secondary_table_name, - redb::TableDefinition::new(secondary_table_name).into(), - ); - } - self.primary_table_definitions - .insert(main_table_name, primary_table_definition); - - Ok(()) - } - - #[cfg(feature = "native_model")] - pub fn migrate(&mut self) -> Result<()> { - use redb::ReadableTable; - - // Panic if T is legacy - let new_table_definition = self - .primary_table_definitions - .get(T::struct_db_schema().table_name) - .unwrap(); - if new_table_definition.native_model_legacy { - // TODO: test - panic!( - "The table {} is legacy, you can't migrate it", - T::struct_db_schema().table_name - ); - } - - // Check which table are the data - let mut old_table_definition = None; - for other_primary_table_definition in self.primary_table_definitions.values() { - let rx = self.instance.begin_read()?; - - // check if table exists, if the table does not exist continue - if rx - .list_tables()? - .find(|table| table.name() == other_primary_table_definition.redb.name()) - .is_none() - { - continue; - } - - let table = rx.open_table(other_primary_table_definition.redb.clone())?; - let len = table.len()?; - if len > 0 && old_table_definition.is_some() { - panic!( - "Impossible to migrate the table {} because the table {} has data", - T::struct_db_schema().table_name, - other_primary_table_definition.redb.name() - ); - } else if table.len()? > 0 { - old_table_definition = Some(other_primary_table_definition); - } - } - - // Check there data in the old table - if old_table_definition.is_none() { - // Nothing to migrate - return Ok(()); - } - - let old_table_definition = old_table_definition.unwrap(); - - // If the old table is the same as the new table, nothing to migrate - if old_table_definition.redb.name() == T::struct_db_schema().table_name { - // Nothing to migrate - return Ok(()); - } - - let wx = self.transaction()?; - { - let mut tables = wx.tables(); - let old_data = - tables.internal_primary_drain(&wx, old_table_definition.schema.table_name, ..)?; - - for old_data in old_data { - let (decoded_item, _) = native_model::decode::(old_data.0).unwrap(); - tables.insert(&wx, decoded_item)?; - } - } - wx.commit()?; - - Ok(()) - } - - pub fn redb_stats(&self) -> Result { - use redb::ReadableTable; - let rx = self.instance.begin_read()?; - let mut stats_tables = vec![]; - for table in rx.list_tables()? { - let table_definition: redb::TableDefinition<'_, &'static [u8], &'static [u8]> = - redb::TableDefinition::new(&table.name()); - let table_open = rx.open_table(table_definition)?; - let num_raw = table_open.len()?; - stats_tables.push(StatsTable { - name: table.name().to_string(), - num_raw: num_raw as usize, - }); - } - Ok(Stats { stats_tables }) - } -} - -impl Db { - /// Creates a new read-write transaction. - /// - /// # Example - /// ``` - /// use serde::{Deserialize, Serialize}; - /// use struct_db::*; - /// - /// #[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] - /// #[struct_db(pk = p_key)] - /// struct Data(u32); - /// impl Data {pub fn p_key(&self) -> Vec {self.0.to_be_bytes().to_vec()}} - /// - /// fn main() { - /// let mut db = Db::create_tmp("my_db_t").unwrap(); - /// db.define::(); - /// - /// // Use transaction to insert a new data - /// let mut txn = db.transaction().unwrap(); - /// { - /// let mut data = Data(42); - /// let mut tables = txn.tables(); - /// tables.insert(&txn, data).unwrap(); - /// } - /// txn.commit().unwrap(); // /!\ Don't forget to commit - /// - /// // Use transaction to update a data - /// let mut txn = db.transaction().unwrap(); - /// { - /// let mut tables = txn.tables(); - /// let (new_data, old_data) = { - /// let old_data = tables.primary_get::(&txn, &42_u32.to_be_bytes()).unwrap().unwrap(); - /// let mut new_data = old_data.clone(); - /// new_data.0 = 43; - /// (new_data, old_data) - /// }; - /// tables.update(&txn, old_data, new_data).unwrap(); - /// } - /// txn.commit().unwrap(); // /!\ Don't forget to commit - /// - /// // Use transaction to delete a data - /// let mut txn = db.transaction().unwrap(); - /// { - /// let mut tables = txn.tables(); - /// let data = tables.primary_get::(&txn, &43_u32.to_be_bytes()).unwrap().unwrap(); - /// tables.remove(&txn, data).unwrap(); - /// } - /// txn.commit().unwrap(); // /!\ Don't forget to commit - /// } - pub fn transaction(&self) -> Result { - let txn = self.instance.begin_write()?; - let write_txn = Transaction { - table_definitions: &self.primary_table_definitions, - txn, - watcher: &self.watchers, - batch: RefCell::new(watch::Batch::new()), - }; - Ok(write_txn) - } - - /// Creates a new read-only transaction. - /// - /// # Example - /// ``` - /// use serde::{Deserialize, Serialize}; - /// use struct_db::*; - /// - /// #[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] - /// #[struct_db(pk = p_key)] - /// struct Data(u32); - /// impl Data {pub fn p_key(&self) -> Vec {self.0.to_be_bytes().to_vec()}} - /// - /// fn main() { - /// let mut db = Db::create_tmp("my_db_rt").unwrap(); - /// db.define::(); - /// - /// // Insert a new data - /// let mut txn = db.transaction().unwrap(); - /// { - /// let mut tables = txn.tables(); - /// tables.insert(&txn, Data(42)).unwrap(); - /// } - /// txn.commit().unwrap(); // /!\ Don't forget to commit - /// - /// let txn_read = db.read_transaction().unwrap(); - /// let mut tables = txn_read.tables(); - /// let len = tables.len::(&txn_read).unwrap(); - /// assert_eq!(len, 1); - /// } - pub fn read_transaction(&self) -> Result { - let txn = self.instance.begin_read()?; - let read_txn = ReadOnlyTransaction { - table_definitions: &self.primary_table_definitions, - txn, - }; - Ok(read_txn) - } -} - -impl Db { - fn generate_watcher_id(&self) -> Result { - let value = self - .watchers_counter_id - .fetch_add(1, std::sync::atomic::Ordering::SeqCst); - if value == u64::MAX { - Err(Error::MaxWatcherReached.into()) - } else { - Ok(value) - } - } - - fn watch_generic( - &self, - table_filter: watch::TableFilter, - ) -> Result<(MpscReceiver, u64)> { - #[cfg(not(feature = "tokio"))] - let (event_sender, event_receiver) = std::sync::mpsc::channel(); - #[cfg(feature = "tokio")] - let (event_sender, event_receiver) = tokio::sync::mpsc::unbounded_channel(); - let event_sender = Arc::new(Mutex::new(event_sender)); - let id = self.generate_watcher_id()?; - let mut watchers = self.watchers.write().unwrap(); - watchers.add_sender(id, &table_filter, Arc::clone(&event_sender)); - drop(watchers); - Ok((event_receiver, id)) - } - - /// Watches for changes in the specified table for the given primary key. - /// If the argument `key` is `None` you will receive all the events from the table. - /// Otherwise you will receive only the events for the given primary key. - /// - /// Supported channels to to receive changes: - /// - [`std::sync::mpsc::Receiver`](https://doc.rust-lang.org/std/sync/mpsc/struct.Receiver.html) by default - /// - [`tokio::sync::mpsc::UnboundedReceiver`](https://docs.rs/tokio/latest/tokio/sync/mpsc/struct.UnboundedReceiver.html) with the feature (`tokio`). - /// - /// To unregister the watcher you need to call [`unwatch`](Db::unwatch) - /// with the returned `id`. - /// - /// Get data from the event, use `inner()` method on: - /// - [`watch::Insert::inner`](watch::Insert::inner) - /// - [`watch::Update::inner_new`](watch::Update::inner_new) to get the updated data - /// - [`watch::Update::inner_old`](watch::Update::inner_old) to get the old data - /// - [`watch::Delete::inner`](watch::Delete::inner) - /// - /// # Example - /// ``` - /// use serde::{Deserialize, Serialize}; - /// use struct_db::*; - /// - /// #[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] - /// #[struct_db(pk = p_key)] - /// struct Data(u32); - /// impl Data {pub fn p_key(&self) -> Vec {self.0.to_be_bytes().to_vec()}} - /// - /// fn main() { - /// let mut db = Db::create_tmp("my_db").unwrap(); - /// db.define::(); - /// - /// // None you will receive all the events from Data. - /// let (event_receiver, _id) = db.primary_watch::(None).unwrap(); - /// - /// // Add a new data - /// let mut txn = db.transaction().unwrap(); - /// { - /// let mut tables = txn.tables(); - /// tables.insert(&txn, Data(42)).unwrap(); - /// } - /// txn.commit().unwrap(); // /!\ Don't forget to commit - /// - /// // Wait for the event - /// for _ in 0..1 { - /// // With the feature "tokio" you can use async/await pattern - /// let event = event_receiver.recv().unwrap(); - /// if let watch::Event::Insert(insert) = event { - /// let data = insert.inner::(); - /// assert_eq!(data, Data(42)); - /// } - /// } - /// } - pub fn primary_watch( - &self, - key: Option<&[u8]>, - ) -> Result<(MpscReceiver, u64)> { - let table_name = T::struct_db_schema().table_name; - let table_filter = watch::TableFilter::new_primary(table_name.as_bytes(), key); - self.watch_generic(table_filter) - } - - /// Watches for changes in the specified table for the given prefix. - /// You will receive all the events for the given prefix. - /// - /// To unregister the watcher you need to call [`unwatch`](Db::unwatch) - /// with the returned `id`. - /// - /// # Example - /// - Similar to [`primary_watch`](#method.primary_watch) but with a prefix. - pub fn primary_watch_start_with( - &self, - key_prefix: &[u8], - ) -> Result<(MpscReceiver, u64)> { - let table_name = T::struct_db_schema().table_name; - let table_filter = - watch::TableFilter::new_primary_start_with(table_name.as_bytes(), key_prefix); - self.watch_generic(table_filter) - } - - /// Watches for changes in the specified table for the given secondary key. - /// If the argument `key` is `None` you will receive all the events from the table. - /// Otherwise you will receive only the events for the given secondary key. - /// - /// To unregister the watcher you need to call [`unwatch`](Db::unwatch) - /// with the returned `id`. - /// - /// # Example - /// - Similar to [`primary_watch`](#method.primary_watch) but with a secondary key. - pub fn secondary_watch( - &self, - key_def: impl KeyDefinition, - key: Option<&[u8]>, - ) -> Result<(MpscReceiver, u64)> { - let table_name = T::struct_db_schema().table_name; - let table_filter = watch::TableFilter::new_secondary(table_name.as_bytes(), key_def, key); - self.watch_generic(table_filter) - } - - /// Watches for changes in the specified table for the given prefix. - /// You will receive all the events for the given prefix. - /// - /// To unregister the watcher you need to call [`unwatch`](Db::unwatch) - /// with the returned `id`. - /// - /// # Example - /// - Similar to [`primary_watch`](#method.primary_watch) but with a secondary key and a prefix. - pub fn secondary_watch_start_with( - &self, - key_def: impl KeyDefinition, - key_prefix: &[u8], - ) -> Result<(MpscReceiver, u64)> { - let table_name = T::struct_db_schema().table_name; - let table_filter = watch::TableFilter::new_secondary_start_with( - table_name.as_bytes(), - key_def, - key_prefix, - ); - self.watch_generic(table_filter) - } - - /// Unwatch the given `id`. - /// You can get the `id` from the return value of [`primary_watch`](#method.primary_watch). - /// If the `id` is not valid anymore, this function will do nothing. - /// If the `id` is valid, the corresponding watcher will be removed. - pub fn unwatch(&self, id: u64) -> Result<()> { - let mut watchers = self.watchers.write().unwrap(); - watchers.remove_sender(id); - Ok(()) - } -} diff --git a/src/db_type/error.rs b/src/db_type/error.rs new file mode 100644 index 00000000..5f4d42e8 --- /dev/null +++ b/src/db_type/error.rs @@ -0,0 +1,60 @@ +use crate::{db_type, watch}; +use thiserror::Error; + +#[derive(Debug, Error)] +pub enum Error { + #[error("Redb error")] + Redb(#[from] redb::Error), + + #[error("Redb database error")] + RedbDatabaseError(#[from] redb::DatabaseError), + + #[error("Redb transaction error")] + RedbTransactionError(#[from] redb::TransactionError), + + #[error("Redb storage error")] + RedbStorageError(#[from] redb::StorageError), + + #[error("Redb table error")] + RedbTableError(#[from] redb::TableError), + + #[error("Redb commit error")] + RedbCommitError(#[from] redb::CommitError), + + #[error("IO error")] + Io(#[from] std::io::Error), + + #[error("Table definition not found {table}")] + TableDefinitionNotFound { table: String }, + + #[error("Secondary key definition not found {table} {key}")] + SecondaryKeyDefinitionNotFound { table: String, key: String }, + + #[error("Secondary key constraint mismatch {table} {key} got: {got:?}")] + SecondaryKeyConstraintMismatch { + table: String, + key: String, + got: db_type::DatabaseSecondaryKeyOptions, + }, + + #[error("The secondary key {key_name} is not unique ")] + NotUniqueSecondaryKey { key_name: String }, + + #[error("Key not found {key:?}")] + KeyNotFound { key: Vec }, + + #[error("Primary key associated with the secondary key not found")] + PrimaryKeyNotFound, + + #[error("Duplicate key for \"{key_name}\"")] + DuplicateKey { key_name: String }, + + #[error("Watch event error")] + WatchEventError(#[from] watch::WatchEventError), + + #[error("Max watcher reached (should be impossible)")] + MaxWatcherReached, + + #[error("You can not migrate the table {0} because it is a legacy model")] + MigrateLegacyModel(String), +} diff --git a/src/db_type/input.rs b/src/db_type/input.rs new file mode 100644 index 00000000..f629633c --- /dev/null +++ b/src/db_type/input.rs @@ -0,0 +1,67 @@ +use crate::db_type::{ + composite_key, DatabaseInnerKeyValue, DatabaseKeyDefinition, DatabaseKeyValue, + DatabaseSecondaryKeyOptions, Error, Result, +}; + +#[derive(Debug)] +pub struct DatabaseInput { + pub(crate) primary_key: DatabaseInnerKeyValue, + pub(crate) secondary_keys: std::collections::HashMap< + DatabaseKeyDefinition, + DatabaseKeyValue, + >, + pub(crate) value: Vec, +} + +impl DatabaseInput { + pub(crate) fn secondary_key_value( + &self, + secondary_key_def: &DatabaseKeyDefinition, + ) -> Result { + let secondary_key = self.secondary_keys.get(secondary_key_def).ok_or( + Error::SecondaryKeyDefinitionNotFound { + table: "".to_string(), + key: secondary_key_def.unique_table_name.clone(), + }, + )?; + let out = if !secondary_key_def.options.unique { + match secondary_key { + DatabaseKeyValue::Default(value) => { + DatabaseKeyValue::Default(composite_key(value, &self.primary_key)) + } + DatabaseKeyValue::Optional(value) => { + let value = value + .as_ref() + .map(|value| composite_key(value, &self.primary_key)); + DatabaseKeyValue::Optional(value) + } + } + } else { + secondary_key.clone() + }; + Ok(out) + } +} + +pub trait Input: Sized + native_model::Model { + fn native_db_model() -> crate::Model; + + fn native_db_primary_key(&self) -> DatabaseInnerKeyValue; + + fn native_db_secondary_keys( + &self, + ) -> std::collections::HashMap< + DatabaseKeyDefinition, + DatabaseKeyValue, + >; + fn native_db_bincode_encode_to_vec(&self) -> Vec; + fn native_db_bincode_decode_from_slice(slice: &[u8]) -> Self; + + fn to_item(&self) -> DatabaseInput { + DatabaseInput { + primary_key: self.native_db_primary_key(), + secondary_keys: self.native_db_secondary_keys(), + value: self.native_db_bincode_encode_to_vec(), + } + } +} diff --git a/src/db_type/key/inner_key_value.rs b/src/db_type/key/inner_key_value.rs new file mode 100644 index 00000000..b46f5b1f --- /dev/null +++ b/src/db_type/key/inner_key_value.rs @@ -0,0 +1,377 @@ +use redb::{RedbKey, RedbValue, TypeName}; +use std::fmt::Debug; +use std::ops::{Bound, Range, RangeBounds, RangeFrom, RangeInclusive, RangeTo, RangeToInclusive}; + +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct DatabaseInnerKeyValue(Vec); + +impl DatabaseInnerKeyValue { + fn new(data: Vec) -> Self { + Self(data) + } + + pub(crate) fn extend(&mut self, data: &DatabaseInnerKeyValue) { + self.0.extend(data.0.iter()); + } + + pub(crate) fn as_slice(&self) -> &[u8] { + self.0.as_slice() + } +} + +pub trait InnerKeyValue: Debug { + fn database_inner_key_value(&self) -> DatabaseInnerKeyValue; +} + +// Implement for char +impl InnerKeyValue for char { + fn database_inner_key_value(&self) -> DatabaseInnerKeyValue { + DatabaseInnerKeyValue::new(u32::from(*self).to_be_bytes().to_vec()) + } +} + +// Implement for String +impl InnerKeyValue for String { + fn database_inner_key_value(&self) -> DatabaseInnerKeyValue { + self.as_str().database_inner_key_value() + } +} + +// Implement for &str +impl InnerKeyValue for &str { + fn database_inner_key_value(&self) -> DatabaseInnerKeyValue { + DatabaseInnerKeyValue::new(self.as_bytes().to_vec()) + } +} + +impl InnerKeyValue for DatabaseInnerKeyValue { + // TODO: Bad because that cause a copy of the data when we pass a DatabaseInnerKeyValue to a function + // which has a impl InnerKeyValue parameter + fn database_inner_key_value(&self) -> DatabaseInnerKeyValue { + self.clone() + } +} + +// Implement for Slice +impl InnerKeyValue for &[T] +where + T: InnerKeyValue, +{ + fn database_inner_key_value(&self) -> DatabaseInnerKeyValue { + let mut data = Vec::new(); + for item in self.iter().as_slice() { + data.extend(item.database_inner_key_value().0); + } + DatabaseInnerKeyValue::new(data) + } +} + +// Implement for tuples +impl InnerKeyValue for () { + fn database_inner_key_value(&self) -> DatabaseInnerKeyValue { + DatabaseInnerKeyValue::new(Vec::new()) + } +} + +// Macro for tuples +macro_rules! impl_inner_key_value_for_tuple { + ( $($t:ident, $i:tt),+ | $t_last:ident, $i_last:tt ) => { + impl<$($t: InnerKeyValue,)+ $t_last: InnerKeyValue> InnerKeyValue for ($($t,)+ $t_last) { + fn database_inner_key_value(&self) -> DatabaseInnerKeyValue { + let mut data = Vec::new(); + $( + data.extend(self.$i.database_inner_key_value().0); + )+ + data.extend(self.$i_last.database_inner_key_value().0); + DatabaseInnerKeyValue::new(data) + } + } + } +} + +// Implementations for tuples of different sizes +#[rustfmt::skip] +impl_inner_key_value_for_tuple!( + T0, 0 | + T1, 1 +); +#[rustfmt::skip] +impl_inner_key_value_for_tuple!( + T0, 0, T1, 1 | + T2, 2 +); +#[rustfmt::skip] +impl_inner_key_value_for_tuple!( + T0, 0, T1, 1, + T2, 2 | T3, 3 +); +#[rustfmt::skip] +impl_inner_key_value_for_tuple!( + T0, 0, T1, 1, + T2, 2, T3, 3 | + T4, 4 +); +#[rustfmt::skip] +impl_inner_key_value_for_tuple!( + T0, 0, T1, 1, + T2, 2, T3, 3, + T4, 4 | T5, 5 +); +#[rustfmt::skip] +impl_inner_key_value_for_tuple!( + T0, 0, T1, 1, + T2, 2, T3, 3, + T4, 4, T5, 5 + | T6, 6 +); +#[rustfmt::skip] +impl_inner_key_value_for_tuple!( + T0, 0, T1, 1, + T2, 2, T3, 3, + T4, 4, T5, 5, + T6, 6 | T7, 7 +); +#[rustfmt::skip] +impl_inner_key_value_for_tuple!( + T0, 0, T1, 1, + T2, 2, T3, 3, + T4, 4, T5, 5, + T6, 6, T7, 7 | + T8, 8 +); +#[rustfmt::skip] +impl_inner_key_value_for_tuple!( + T0, 0, T1, 1, + T2, 2, T3, 3, + T4, 4, T5, 5, + T6, 6, T7, 7, + T8, 8 | T9, 9 +); +#[rustfmt::skip] +impl_inner_key_value_for_tuple!( + T0, 0, T1, 1, + T2, 2, T3, 3, + T4, 4, T5, 5, + T6, 6, T7, 7, + T8, 8, T9, 9 | + T10, 10 +); +#[rustfmt::skip] +impl_inner_key_value_for_tuple!( + T0, 0, T1, 1, + T2, 2, T3, 3, + T4, 4, T5, 5, + T6, 6, T7, 7, + T8, 8, T9, 9, + T10, 10 | T11, 11 +); + +// Implement InnerKeyValue for Vec where T: InnerKeyValue +impl InnerKeyValue for Vec +where + T: InnerKeyValue, +{ + fn database_inner_key_value(&self) -> DatabaseInnerKeyValue { + let mut data = Vec::new(); + for item in self { + data.extend(item.database_inner_key_value().0); + } + DatabaseInnerKeyValue::new(data) + } +} + +// Implement InnerKeyValue for Option where T: InnerKeyValue +impl InnerKeyValue for Option +where + T: InnerKeyValue, +{ + fn database_inner_key_value(&self) -> DatabaseInnerKeyValue { + match self { + Some(value) => value.database_inner_key_value(), + None => DatabaseInnerKeyValue::new(Vec::new()), + } + } +} + +// Macro for implementing InnerKeyValue for u8, u16, u32, u64, u128, i8, i16, i32, i64, i128, f32, f64 +macro_rules! impl_inner_key_value_for_primitive { + ($type:ty) => { + impl InnerKeyValue for $type { + fn database_inner_key_value(&self) -> DatabaseInnerKeyValue { + DatabaseInnerKeyValue::new(self.to_be_bytes().to_vec()) + } + } + }; +} + +impl_inner_key_value_for_primitive!(u8); +impl_inner_key_value_for_primitive!(u16); +impl_inner_key_value_for_primitive!(u32); +impl_inner_key_value_for_primitive!(u64); +impl_inner_key_value_for_primitive!(u128); +impl_inner_key_value_for_primitive!(i8); +impl_inner_key_value_for_primitive!(i16); +impl_inner_key_value_for_primitive!(i32); +impl_inner_key_value_for_primitive!(i64); +impl_inner_key_value_for_primitive!(i128); +impl_inner_key_value_for_primitive!(f32); +impl_inner_key_value_for_primitive!(f64); + +impl RedbValue for DatabaseInnerKeyValue { + type SelfType<'a> = DatabaseInnerKeyValue; + type AsBytes<'a> = &'a [u8] where Self: 'a; + + fn fixed_width() -> Option { + None + } + + fn from_bytes<'a>(data: &'a [u8]) -> Self::SelfType<'a> + where + Self: 'a, + { + data.database_inner_key_value() + } + + fn as_bytes<'a, 'b: 'a>(value: &'a Self::SelfType<'b>) -> Self::AsBytes<'a> + where + Self: 'a, + Self: 'b, + { + value.0.as_slice() + } + + fn type_name() -> TypeName { + TypeName::new("DatabaseInnerKeyValue") + } +} + +impl RedbKey for DatabaseInnerKeyValue { + fn compare(data1: &[u8], data2: &[u8]) -> std::cmp::Ordering { + data1.cmp(&data2) + } +} + +pub enum DatabaseInnerKeyValueRange { + Range(Range), + RangeInclusive(RangeInclusive), + RangeFrom(RangeFrom), + RangeTo(RangeTo), + RangeToInclusive(RangeToInclusive), + RangeFull, +} + +impl DatabaseInnerKeyValueRange { + pub fn new(bounds: impl RangeBounds) -> DatabaseInnerKeyValueRange + where + T: InnerKeyValue, + { + match (bounds.start_bound(), bounds.end_bound()) { + (Bound::Included(start), Bound::Included(end)) => { + DatabaseInnerKeyValueRange::RangeInclusive( + start.database_inner_key_value()..=end.database_inner_key_value(), + ) + } + (Bound::Included(start), Bound::Excluded(end)) => DatabaseInnerKeyValueRange::Range( + start.database_inner_key_value()..end.database_inner_key_value(), + ), + (Bound::Included(start), Bound::Unbounded) => { + DatabaseInnerKeyValueRange::RangeFrom(RangeFrom { + start: start.database_inner_key_value(), + }) + } + (Bound::Excluded(start), Bound::Included(end)) => { + DatabaseInnerKeyValueRange::RangeInclusive( + start.database_inner_key_value()..=end.database_inner_key_value(), + ) + } + (Bound::Excluded(start), Bound::Excluded(end)) => DatabaseInnerKeyValueRange::Range( + start.database_inner_key_value()..end.database_inner_key_value(), + ), + (Bound::Excluded(start), Bound::Unbounded) => { + DatabaseInnerKeyValueRange::RangeFrom(RangeFrom { + start: start.database_inner_key_value(), + }) + } + (Bound::Unbounded, Bound::Included(end)) => { + DatabaseInnerKeyValueRange::RangeTo(RangeTo { + end: { end.database_inner_key_value() }, + }) + } + (Bound::Unbounded, Bound::Excluded(end)) => { + DatabaseInnerKeyValueRange::RangeTo(RangeTo { + end: end.database_inner_key_value(), + }) + } + (Bound::Unbounded, Bound::Unbounded) => DatabaseInnerKeyValueRange::RangeFull, + } + } +} + +impl RangeBounds for DatabaseInnerKeyValueRange { + fn start_bound(&self) -> Bound<&DatabaseInnerKeyValue> { + match self { + DatabaseInnerKeyValueRange::Range(range) => range.start_bound(), + DatabaseInnerKeyValueRange::RangeInclusive(range) => range.start_bound(), + DatabaseInnerKeyValueRange::RangeFrom(range) => range.start_bound(), + DatabaseInnerKeyValueRange::RangeTo(range) => range.start_bound(), + DatabaseInnerKeyValueRange::RangeToInclusive(range) => range.start_bound(), + DatabaseInnerKeyValueRange::RangeFull => Bound::Unbounded, + } + } + + fn end_bound(&self) -> Bound<&DatabaseInnerKeyValue> { + match self { + DatabaseInnerKeyValueRange::Range(range) => range.end_bound(), + DatabaseInnerKeyValueRange::RangeInclusive(range) => range.end_bound(), + DatabaseInnerKeyValueRange::RangeFrom(range) => range.end_bound(), + DatabaseInnerKeyValueRange::RangeTo(range) => range.end_bound(), + DatabaseInnerKeyValueRange::RangeToInclusive(range) => range.end_bound(), + DatabaseInnerKeyValueRange::RangeFull => Bound::Unbounded, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::ops::RangeBounds; + + fn range>(range: R) -> DatabaseInnerKeyValueRange { + let range = DatabaseInnerKeyValueRange::new(range); + range + } + + #[test] + fn test_range() { + use redb::{ReadableTable, TableDefinition}; + + const TABLE: TableDefinition = TableDefinition::new("my_data"); + + let backend = redb::backends::InMemoryBackend::new(); + let db = redb::Database::builder() + .create_with_backend(backend) + .unwrap(); + let write_txn = db.begin_write().unwrap(); + { + let mut table = write_txn.open_table(TABLE).unwrap(); + table.insert(0u32.database_inner_key_value(), &123).unwrap(); + } + write_txn.commit().unwrap(); + + let read_txn = db.begin_read().unwrap(); + let table = read_txn.open_table(TABLE).unwrap(); + assert_eq!( + table + .get(0u32.database_inner_key_value()) + .unwrap() + .unwrap() + .value(), + 123 + ); + + let range = range(0..2); + let iter = table.range::(range).unwrap(); + let result: Vec<_> = iter.collect(); + assert_eq!(result.len(), 1); + } +} diff --git a/src/db_type/key/key_definition.rs b/src/db_type/key/key_definition.rs new file mode 100644 index 00000000..3bd89b15 --- /dev/null +++ b/src/db_type/key/key_definition.rs @@ -0,0 +1,73 @@ +use crate::db_type::DatabaseInnerKeyValue; +use std::hash::Hash; + +pub trait KeyDefinition { + fn database_key(&self) -> DatabaseKeyDefinition; +} + +#[derive(Default, Clone, Debug)] +pub struct DatabaseKeyDefinition { + pub(crate) unique_table_name: String, + pub(crate) options: O, +} + +impl KeyDefinition for DatabaseKeyDefinition { + fn database_key(&self) -> DatabaseKeyDefinition { + self.clone() + } +} + +impl DatabaseKeyDefinition { + pub fn new(model_id: u32, model_version: u32, name: &'static str, options: O) -> Self { + let table_name = format!("{}_{}_{}", model_id, model_version, name); + Self { + options, + unique_table_name: table_name, + } + } + + pub fn options(&self) -> &O { + &self.options + } +} + +impl From<&'static str> for DatabaseKeyDefinition<()> { + fn from(name: &'static str) -> Self { + Self::new(0, 0, name, ()) + } +} + +impl From<&'static str> for DatabaseKeyDefinition { + fn from(name: &'static str) -> Self { + Self::new(0, 0, name, DatabaseSecondaryKeyOptions::default()) + } +} + +impl PartialEq for DatabaseKeyDefinition { + fn eq(&self, other: &Self) -> bool { + self.unique_table_name == other.unique_table_name + } +} + +impl Eq for DatabaseKeyDefinition {} + +impl Hash for DatabaseKeyDefinition { + fn hash(&self, state: &mut H) { + self.unique_table_name.hash(state); + } +} + +#[derive(Clone, Debug, Default, PartialEq, Eq)] +pub struct DatabaseSecondaryKeyOptions { + pub unique: bool, + pub optional: bool, +} + +pub fn composite_key( + secondary_key: &DatabaseInnerKeyValue, + primary_key: &DatabaseInnerKeyValue, +) -> DatabaseInnerKeyValue { + let mut secondary_key = secondary_key.clone(); + secondary_key.extend(primary_key); + secondary_key +} diff --git a/src/db_type/key/key_value.rs b/src/db_type/key/key_value.rs new file mode 100644 index 00000000..ece7242a --- /dev/null +++ b/src/db_type/key/key_value.rs @@ -0,0 +1,7 @@ +use crate::db_type::DatabaseInnerKeyValue; + +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum DatabaseKeyValue { + Default(DatabaseInnerKeyValue), + Optional(Option), +} diff --git a/src/db_type/key/mod.rs b/src/db_type/key/mod.rs new file mode 100644 index 00000000..163eee9c --- /dev/null +++ b/src/db_type/key/mod.rs @@ -0,0 +1,7 @@ +mod inner_key_value; +mod key_definition; +mod key_value; + +pub use inner_key_value::*; +pub use key_definition::*; +pub use key_value::*; diff --git a/src/db_type/mod.rs b/src/db_type/mod.rs new file mode 100644 index 00000000..aef13625 --- /dev/null +++ b/src/db_type/mod.rs @@ -0,0 +1,11 @@ +mod error; +mod input; +mod key; +mod output; +mod result; + +pub use error::*; +pub use input::*; +pub use key::*; +pub use output::*; +pub use result::*; diff --git a/src/db_type/output.rs b/src/db_type/output.rs new file mode 100644 index 00000000..4b6aa27c --- /dev/null +++ b/src/db_type/output.rs @@ -0,0 +1,26 @@ +use crate::db_type::Input; + +#[derive(Clone, Debug)] +pub struct DatabaseOutputValue(pub(crate) Vec); + +impl From<&[u8]> for DatabaseOutputValue { + fn from(slice: &[u8]) -> Self { + Self(slice.to_vec()) + } +} + +impl DatabaseOutputValue { + pub fn inner(&self) -> T { + T::native_db_bincode_decode_from_slice(&self.0) + } +} + +pub(crate) fn unwrap_item(item: Option>) -> Option { + if let Some(item) = item { + let item = item.value(); + let item = T::native_db_bincode_decode_from_slice(item); + Some(item) + } else { + None + } +} diff --git a/src/db_type/result.rs b/src/db_type/result.rs new file mode 100644 index 00000000..25c5217f --- /dev/null +++ b/src/db_type/result.rs @@ -0,0 +1,3 @@ +use super::Error; + +pub type Result = std::result::Result; diff --git a/src/item.rs b/src/item.rs deleted file mode 100644 index 868fca1b..00000000 --- a/src/item.rs +++ /dev/null @@ -1,55 +0,0 @@ -#[cfg(not(feature = "native_model"))] -pub trait SDBItem: Sized { - fn struct_db_schema() -> crate::Schema; - fn struct_db_pk(&self) -> Vec; - - // Return map of secondary table name and the value of the key - fn struct_db_gks(&self) -> std::collections::HashMap<&'static str, Vec>; - fn struct_db_bincode_encode_to_vec(&self) -> Vec; - fn struct_db_bincode_decode_from_slice(slice: &[u8]) -> Self; - - fn to_item(&self) -> Item { - Item { - primary_key: self.struct_db_pk(), - secondary_keys: self.struct_db_gks(), - value: self.struct_db_bincode_encode_to_vec(), - } - } -} - -#[cfg(feature = "native_model")] -pub trait SDBItem: Sized + native_model::Model { - fn struct_db_schema() -> crate::Schema; - fn struct_db_pk(&self) -> Vec; - fn struct_db_gks(&self) -> std::collections::HashMap<&'static str, Vec>; - fn struct_db_bincode_encode_to_vec(&self) -> Vec; - fn struct_db_bincode_decode_from_slice(slice: &[u8]) -> Self; - - fn to_item(&self) -> Item { - Item { - primary_key: self.struct_db_pk(), - secondary_keys: self.struct_db_gks(), - value: self.struct_db_bincode_encode_to_vec(), - } - } -} - -pub trait KeyDefinition: Sized { - fn secondary_table_name(&self) -> &'static str; -} - -#[derive(Clone, Debug)] -pub(crate) struct BinaryValue(pub(crate) Vec); - -impl BinaryValue { - pub fn inner(&self) -> T { - T::struct_db_bincode_decode_from_slice(&self.0) - } -} - -#[derive(Debug)] -pub struct Item { - pub(crate) primary_key: Vec, - pub(crate) secondary_keys: std::collections::HashMap<&'static str, Vec>, - pub(crate) value: Vec, -} diff --git a/src/iterator/mod.rs b/src/iterator/mod.rs deleted file mode 100644 index ddfacc97..00000000 --- a/src/iterator/mod.rs +++ /dev/null @@ -1,9 +0,0 @@ -mod primary_iterator; -mod primary_iterator_start_with; -mod secondary_iterator; -mod secondary_iterator_start_with; - -pub use primary_iterator::*; -pub use primary_iterator_start_with::*; -pub use secondary_iterator::*; -pub use secondary_iterator_start_with::*; diff --git a/src/iterator/primary_iterator.rs b/src/iterator/primary_iterator.rs deleted file mode 100644 index 70bbb248..00000000 --- a/src/iterator/primary_iterator.rs +++ /dev/null @@ -1,31 +0,0 @@ -use crate::common::unwrap_item; -use crate::SDBItem; -use std::iter; -use std::marker::PhantomData; - -/// Provides a way to iterate over the values stored in a database and -/// automatically deserialize them into items of type `T`. -pub struct PrimaryIterator<'a, 'txn, 'db, T: SDBItem> { - pub(crate) range: redb::Range<'a, &'static [u8], &'static [u8]>, - pub(crate) _marker: PhantomData<(&'db (), &'txn (), T)>, -} - -impl<'a, 'txn, 'db, T: SDBItem> iter::Iterator for PrimaryIterator<'a, 'txn, 'db, T> { - type Item = T; - - fn next(&mut self) -> Option { - match self.range.next() { - Some(Ok((_, v))) => unwrap_item(Some(v)), - _ => None, - } - } -} - -impl<'a, 'txn, 'db, T: SDBItem> DoubleEndedIterator for PrimaryIterator<'a, 'txn, 'db, T> { - fn next_back(&mut self) -> Option { - match self.range.next_back() { - Some(Ok((_, v))) => unwrap_item(Some(v)), - _ => None, - } - } -} diff --git a/src/iterator/primary_iterator_start_with.rs b/src/iterator/primary_iterator_start_with.rs deleted file mode 100644 index 5870cf85..00000000 --- a/src/iterator/primary_iterator_start_with.rs +++ /dev/null @@ -1,30 +0,0 @@ -use crate::common::unwrap_item; -use crate::SDBItem; -use std::marker::PhantomData; - -/// Same as [`PrimaryIterator`](crate::PrimaryIterator) but only returns values which primary key starts with the given prefix. -pub struct PrimaryIteratorStartWith<'a, 'txn, 'db, T: SDBItem> { - pub(crate) range: redb::Range<'a, &'static [u8], &'static [u8]>, - pub(crate) start_with: &'a [u8], - pub(crate) _marker: PhantomData<(&'db (), &'txn (), T)>, -} - -impl<'a, 'txn, 'db, T: SDBItem> Iterator for PrimaryIteratorStartWith<'a, 'txn, 'db, T> { - type Item = T; - - fn next(&mut self) -> Option { - match self.range.next() { - Some(Ok((k, v))) => { - let k = k.value(); - if k.starts_with(self.start_with) { - unwrap_item(Some(v)) - } else { - None - } - } - _ => None, - } - } -} - -// TODO: Found a way to implement DoubleEndedIterator for StructDBIteratorStartWith diff --git a/src/iterator/secondary_iterator.rs b/src/iterator/secondary_iterator.rs deleted file mode 100644 index ab0c47db..00000000 --- a/src/iterator/secondary_iterator.rs +++ /dev/null @@ -1,51 +0,0 @@ -use crate::common::unwrap_item; -use crate::SDBItem; -use redb::ReadableTable; -use std::marker::PhantomData; - -/// Same as [`PrimaryIterator`](crate::PrimaryIterator) but only returns values that match the given secondary key. -pub struct SecondaryIterator< - 'a, - 'txn, - 'db, - T: SDBItem, - MT: ReadableTable<&'static [u8], &'static [u8]>, -> { - pub(crate) range: redb::Range<'a, &'static [u8], &'static [u8]>, - pub(crate) main_table: &'a MT, - pub(crate) _marker: PhantomData<(&'db (), &'txn (), T)>, -} - -impl<'a, 'txn, 'db, T: SDBItem, MT: ReadableTable<&'static [u8], &'static [u8]>> Iterator - for SecondaryIterator<'a, 'txn, 'db, T, MT> -{ - type Item = T; - - fn next(&mut self) -> Option { - match self.range.next() { - Some(Ok((_, v))) => { - let key: Vec = v.value().into(); - if let Ok(value) = self.main_table.get(&*key) { - unwrap_item(value) - } else { - None - } - } - _ => None, - } - } -} - -impl<'a, 'txn, 'db, T: SDBItem, MT: ReadableTable<&'static [u8], &'static [u8]>> DoubleEndedIterator - for SecondaryIterator<'a, 'txn, 'db, T, MT> -{ - fn next_back(&mut self) -> Option { - match self.range.next_back() { - Some(Ok((_, v))) => { - let key: Vec = v.value().into(); - unwrap_item(self.main_table.get(&*key).unwrap()) - } - _ => None, - } - } -} diff --git a/src/iterator/secondary_iterator_start_with.rs b/src/iterator/secondary_iterator_start_with.rs deleted file mode 100644 index b6d59738..00000000 --- a/src/iterator/secondary_iterator_start_with.rs +++ /dev/null @@ -1,42 +0,0 @@ -use crate::common::unwrap_item; -use crate::SDBItem; -use redb::ReadableTable; -use std::marker::PhantomData; - -/// Same as [`PrimaryIterator`](crate::PrimaryIterator) but only returns values with secondary keys that start with the given -/// prefix. -pub struct SecondaryIteratorStartWith< - 'a, - 'txn, - 'db, - T: SDBItem, - MT: ReadableTable<&'static [u8], &'static [u8]>, -> { - pub(crate) range: redb::Range<'a, &'static [u8], &'static [u8]>, - pub(crate) start_with: &'a [u8], - pub(crate) main_table: &'a MT, - pub(crate) _marker: PhantomData<(&'db (), &'txn (), T)>, -} - -impl<'a, 'txn, 'db, T: SDBItem, MT: ReadableTable<&'static [u8], &'static [u8]>> Iterator - for SecondaryIteratorStartWith<'a, 'txn, 'db, T, MT> -{ - type Item = T; - - fn next(&mut self) -> Option { - match self.range.next() { - Some(Ok((k, v))) => { - let k = k.value(); - if k.starts_with(self.start_with) { - let key: Vec = v.value().into(); - unwrap_item(self.main_table.get(&*key).unwrap()) - } else { - None - } - } - _ => None, - } - } -} - -// TODO: Found a way to implement DoubleEndedIterator for StructDBIteratorStartWithByKey diff --git a/src/lib.rs b/src/lib.rs index 6b64c02a..4bb09ca0 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,191 +1,26 @@ -//! Struct DB is a Rust library that provides a simple, fast, and embedded database solution, +//! Native DB is a Rust library that provides a simple, fast, and embedded database solution, //! focusing on maintaining coherence between Rust types and stored data with minimal boilerplate. -//! It supports multiple indexes, real-time watch with filters, schema migration. -//! -//! Use macro `struct_db`: -//! -//! - required: `pk()` associates a function of the struct that generates the **primary key** of the struct. Allows **only one** `pk` declaration. -//! - optional: `gk()` associates a function of the struct that generates a **secondary key** of the struct. Allows **multiple** `gk` declarations. -//! -//! `struct_db` generates an enum `` with the suffix `Key` that contains all the secondary keys like: E.g. `Key::` more details [`here`](crate::ReadableTable::secondary_get). -//! -//! ## API -//! - Initialize a database: -//! - [`Db::init_tmp()`](crate::Db::create_tmp) initializes a database at a temporary path. -//! - [`Db::init()`](crate::Db::create) initializes a database at a given path. -//! - Define schema -//! - [`db.define::()`](crate::Db::define) initializes a schema. -//! - Transactions -//! - [`db.transaction()`](crate::Db::transaction) starts a [`read-write transaction`](crate::Transaction). -//! - [`db.read_transaction()`](crate::Db::read_transaction) starts a [`read-only transaction`](crate::ReadOnlyTransaction). -//! - Tables (`txn` is a [`Transaction`](crate::Transaction) and `read_only_txn` a [`ReadOnlyTransaction`](crate::ReadOnlyTransaction)) -//! - [`txn.tables()`](crate::Transaction::tables) returns a [`Tables`](crate::Tables) -//! - [`read_only_txn.tables()`](crate::ReadOnlyTransaction::tables) returns a [`ReadOnlyTables`](crate::ReadOnlyTables). -//! - Write operations -//! - [`tables.insert(&txn,)`](crate::Tables::insert) inserts an item into the database. -//! - [`tables.update(&txn,, )`](crate::Tables::update) updates an item in the database. -//! - [`tables.remove(&txn,)`](crate::Tables::remove) removes an item from the database. -//! - [`tables.migrate::(&txn)`](crate::Tables::migrate) migrates the schema from `old_type` to `new_type`. -//! - Read operations -//! - Primary key -//! - [`tables.primary_get(&txn,)`](crate::ReadableTable::primary_get) get an item. -//! - [`tables.primary_iter(&txn)`](crate::ReadableTable::primary_iter) iterate all items. -//! - [`tables.primary_iter_range(&txn,..)`](crate::ReadableTable::primary_iter_range) all items in range. -//! - [`tables.primary_iter_start_with(&txn,)`](crate::ReadableTable::primary_iter_start_with) all items with prefix. -//! - Secondary key -//! - [`tables.secondary_get(&txn,,)`](crate::ReadableTable::secondary_get) get an item. -//! - [`tables.secondary_iter(&txn,,)`](crate::ReadableTable::secondary_iter) iterate all items. -//! - [`tables.secondary_iter_range(&txn,,..)`](crate::ReadableTable::secondary_iter_range) all items in range. -//! - [`tables.secondary_iter_start_with(&txn,,)`](crate::ReadableTable::secondary_iter_start_with) all items with prefix. -//! - Global -//! - [`tables.len::()`](crate::ReadableTable::len) -//! - Watch (details [`here`](crate::Db::primary_watch)) -//! - Primary key -//! - [`db.primary_watch(Option)`](crate::Db::primary_watch) watch all or a specific item. -//! - [`db.primary_watch_start_with()`](crate::Db::primary_watch_start_with) watch all items with prefix. -//! - Secondary key -//! - [`db.secondary_watch(,Option)`](crate::Db::secondary_watch) watch all or a specific item. -//! - [`db.secondary_watch_start_with(,)`](crate::Db::secondary_watch_start_with) watch all items with prefix. -//! - Global -//! - [`db.unwatch()`](crate::Db::unwatch) stop watching a specific watcher. -//! # Example -//! ``` -//! use serde::{Deserialize, Serialize}; -//! use struct_db::*; -//! -//! #[derive(Serialize, Deserialize, PartialEq, Debug)] -//! #[struct_db( -//! pk = p_key, // required -//! gk = s_key, // optional -//! // ... other gk ... -//! )] -//! struct Data(u32, String); -//! -//! impl Data { -//! // Returns primary key as big-endian bytes for consistent lexicographical ordering. -//! pub fn p_key(&self) -> Vec { -//! self.0.to_be_bytes().to_vec() -//! } -//! -//! // Generates a secondary key combining the String field and the big-endian bytes of -//! // the primary key for versatile queries. -//! pub fn s_key(&self) -> Vec { -//! let mut s_key = self.1.as_bytes().to_vec(); -//! s_key.extend_from_slice(&self.p_key().as_slice()); -//! s_key -//! } -//! } -//! -//! fn main() { -//! let mut db = Db::create_tmp("my_db_example").unwrap(); -//! // Initialize the schema -//! db.define::(); -//! -//! // Insert data -//! let txn = db.transaction().unwrap(); -//! { -//! let mut tables = txn.tables(); -//! tables.insert(&txn, Data(1,"red".to_string())).unwrap(); -//! tables.insert(&txn, Data(2,"red".to_string())).unwrap(); -//! tables.insert(&txn, Data(3,"blue".to_string())).unwrap(); -//! } -//! txn.commit().unwrap(); -//! -//! let txn_read = db.read_transaction().unwrap(); -//! let mut tables = txn_read.tables(); -//! -//! // Retrieve data with p_key=3 -//! let retrieve_data: Data = tables.primary_get(&txn_read, &3_u32.to_be_bytes()).unwrap().unwrap(); -//! println!("data p_key='3' : {:?}", retrieve_data); -//! -//! // Iterate data with s_key="red" String -//! for item in tables.secondary_iter_start_with::(&txn_read, DataKey::s_key, "red".as_bytes()).unwrap() { -//! println!("data s_key='1': {:?}", item); -//! } -//! -//! // Remove data -//! let txn = db.transaction().unwrap(); -//! { -//! let mut tables = txn.tables(); -//! tables.remove(&txn, retrieve_data).unwrap(); -//! } -//! txn.commit().unwrap(); -//! } -//! ``` +//! It supports multiple indexes, real-time watch with filters, model migration. mod builder; -mod common; -mod db; -mod item; -mod iterator; -mod operation; -mod query; -mod readable_table; -mod readonly_tables; -mod readonly_transaction; -mod schema; +mod database; +pub mod db_type; +mod model; mod serialization; +mod snapshot; mod stats; mod table_definition; -mod tables; -mod transaction; +pub mod transaction; pub mod watch; +// Re-export +pub use db_type::InnerKeyValue; +pub use db_type::Input; + +// Export pub use builder::*; -pub use db::*; -pub use item::*; -pub use iterator::*; -pub use readable_table::*; -pub use readonly_tables::*; -pub use readonly_transaction::*; -pub use schema::*; +pub use database::*; +pub use model::*; +pub use native_db_macro::*; +pub use native_db_macro::*; pub use serialization::*; -pub use struct_db_macro::*; -pub use tables::*; -pub use transaction::*; - -use thiserror::Error; - -#[derive(Debug, Error)] -pub enum Error { - #[error("Redb error")] - Redb(#[from] redb::Error), - - #[error("Redb database error")] - RedbDatabaseError(#[from] redb::DatabaseError), - - #[error("Redb transaction error")] - RedbTransactionError(#[from] redb::TransactionError), - - #[error("Redb storage error")] - RedbStorageError(#[from] redb::StorageError), - - #[error("Redb table error")] - RedbTableError(#[from] redb::TableError), - - #[error("Redb commit error")] - RedbCommitError(#[from] redb::CommitError), - - #[error("IO error")] - Io(#[from] std::io::Error), - - #[error("Table definition not found {table}")] - TableDefinitionNotFound { table: String }, - - #[error("Key not found {key:?}")] - KeyNotFound { key: Vec }, - - #[error("Primary key associated with the secondary key not found {primary_key:?}")] - PrimaryKeyNotFound { primary_key: Vec }, - - #[error("Duplicate key for \"{key_name}\"")] - DuplicateKey { key_name: &'static str }, - - #[error("Watch event error")] - WatchEventError(#[from] watch::WatchEventError), - - #[error("Max watcher reached (should be impossible)")] - MaxWatcherReached, -} - -pub type Result = std::result::Result; diff --git a/src/model.rs b/src/model.rs new file mode 100644 index 00000000..0b5ffd37 --- /dev/null +++ b/src/model.rs @@ -0,0 +1,38 @@ +use crate::db_type::{DatabaseKeyDefinition, DatabaseSecondaryKeyOptions, Error, Result}; +use std::collections::HashSet; + +/// Model of the Item. Returned by the [`::native_db_model()`](crate::Input::native_db_model) method. +#[derive(Clone, Debug)] +pub struct Model { + pub primary_key: DatabaseKeyDefinition<()>, + pub secondary_keys: HashSet>, +} + +impl Model { + pub fn check_secondary_options( + &self, + secondary_key: &DatabaseKeyDefinition, + check: F, + ) -> Result<()> + where + F: Fn(DatabaseSecondaryKeyOptions) -> bool, + { + let key = self + .secondary_keys + .get(secondary_key.into()) + .ok_or_else(|| Error::SecondaryKeyDefinitionNotFound { + table: self.primary_key.unique_table_name.to_string(), + key: secondary_key.unique_table_name.clone(), + })?; + + if check(key.options.clone()) { + Ok(()) + } else { + Err(Error::SecondaryKeyConstraintMismatch { + table: self.primary_key.unique_table_name.to_string(), + key: secondary_key.unique_table_name.clone(), + got: key.options.clone(), + }) + } + } +} diff --git a/src/operation/mod.rs b/src/operation/mod.rs deleted file mode 100644 index d692799d..00000000 --- a/src/operation/mod.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod write; diff --git a/src/operation/write/mod.rs b/src/operation/write/mod.rs deleted file mode 100644 index 80c5380c..00000000 --- a/src/operation/write/mod.rs +++ /dev/null @@ -1,104 +0,0 @@ -use crate::item::BinaryValue; -use crate::watch::WatcherRequest; -use crate::{Error, Item, Schema, Tables}; -use crate::{ReadableTable, Result, Transaction}; -use std::collections::{HashMap, HashSet}; -use std::ops::{Bound, RangeBounds}; - -impl<'db, 'txn> Tables<'db, 'txn> { - pub(crate) fn internal_primary_drain<'a>( - &mut self, - txn: &'txn Transaction<'db>, - primary_table_name: &'static str, - range_value: impl RangeBounds<&'a [u8]> + 'a + Copy, - ) -> Result> { - let mut items = vec![]; - let mut key_items = HashSet::new(); - { - self.open_primary_table(txn, primary_table_name)?; - let primary_table = self.opened_tables.get_mut(primary_table_name).unwrap(); - // Drain primary table - let drain = primary_table.drain::<&'_ [u8]>(range_value)?; - for result in drain { - let (primary_key, value) = result?; - // TODO: we should delay to an drain iterator - let binary_value = BinaryValue(value.value().to_vec()); - key_items.insert(primary_key.value().to_vec()); - items.push(binary_value); - } - } - - let secondary_table_names: Vec<&str> = self - .table_definitions - .get(primary_table_name) - .ok_or(Error::TableDefinitionNotFound { - table: primary_table_name.to_string(), - })? - .secondary_tables - .iter() - .map(|(name, _)| *name) - .collect(); - - // Drain secondary tables - for secondary_table_name in secondary_table_names { - self.open_secondary_table(txn, primary_table_name, secondary_table_name)?; - use redb::ReadableTable; - let secondary_table = self.opened_tables.get_mut(secondary_table_name).unwrap(); - - // Detect secondary keys to delete - let mut secondary_keys_to_delete = vec![]; - let mut number_detected_key_to_delete = key_items.len(); - for secondary_items in secondary_table.iter()? { - // Ta avoid to iter on all secondary keys if we have already detected all keys to delete - if number_detected_key_to_delete == 0 { - break; - } - let (secondary_key, primary_key) = secondary_items?; - if key_items.contains(primary_key.value()) { - secondary_keys_to_delete.push(secondary_key.value().to_vec()); - number_detected_key_to_delete -= 1; - } - } - - // Delete secondary keys - for secondary_key in secondary_keys_to_delete { - secondary_table.remove(secondary_key.as_slice())?; - } - } - - Ok(items) - } - - pub(crate) fn internal_insert( - &mut self, - txn: &'txn Transaction<'db>, - schema: Schema, - item: Item, - ) -> Result<(WatcherRequest, BinaryValue)> { - let already_exists; - { - self.open_primary_table(txn, schema.table_name)?; - let table = self.opened_tables.get_mut(schema.table_name).unwrap(); - already_exists = table - .insert(item.primary_key.as_slice(), item.value.as_slice())? - .is_some(); - } - - for (secondary_table_name, key) in &item.secondary_keys { - self.open_secondary_table(txn, schema.table_name, secondary_table_name)?; - let secondary_table = self.opened_tables.get_mut(secondary_table_name).unwrap(); - let result = secondary_table.insert(key.as_slice(), item.primary_key.as_slice())?; - if result.is_some() && !already_exists { - return Err(crate::Error::DuplicateKey { - key_name: secondary_table_name, - } - .into()); - } - } - - Ok(( - WatcherRequest::new(schema.table_name, item.primary_key, item.secondary_keys), - BinaryValue(item.value), - )) - } -} diff --git a/src/query/mod.rs b/src/query/mod.rs deleted file mode 100644 index 0dbea30f..00000000 --- a/src/query/mod.rs +++ /dev/null @@ -1,11 +0,0 @@ -enum QueryGet { - Pk(String), - Dk(String, String), - Gk(String), -} - -enum QueryIter { - Pk(String), - Dk(String), - Gk(String), -} diff --git a/src/readable_table.rs b/src/readable_table.rs deleted file mode 100644 index 58ed9236..00000000 --- a/src/readable_table.rs +++ /dev/null @@ -1,371 +0,0 @@ -use crate::common::unwrap_item; -use crate::Error::TableDefinitionNotFound; -use crate::PrimaryIterator; -use crate::{ - KeyDefinition, PrimaryIteratorStartWith, Result, SDBItem, SecondaryIterator, - SecondaryIteratorStartWith, -}; -use redb::ReadableTable as RedbReadableTable; -use std::marker::PhantomData; -use std::ops::RangeBounds; - -pub trait ReadableTable<'db, 'txn> { - type Table: redb::ReadableTable<&'static [u8], &'static [u8]>; - type Transaction<'x>; - - fn open_primary_table( - &mut self, - txn: &'txn Self::Transaction<'db>, - table_name: &'static str, - ) -> Result<()>; - - fn open_secondary_table( - &mut self, - txn: &'txn Self::Transaction<'db>, - primary_table_name: &'static str, - secondary_table_name: &'static str, - ) -> Result<()>; - - fn get_table(&self, table_name: &'static str) -> Option<&Self::Table>; - - /// Get a value from the table. - /// Returns `Ok(None)` if the key does not exist. - /// Available in [`Tables`](crate::Tables) and [`ReadOnlyTables`](crate::ReadOnlyTables). - /// - /// # Example - /// ``` - /// use serde::{Deserialize, Serialize}; - /// use struct_db::*; - /// - /// #[derive(Serialize, Deserialize, Eq, PartialEq, Debug)] - /// #[struct_db(pk = p_key)] - /// struct Data(u32); - /// impl Data {pub fn p_key(&self) -> Vec {self.0.to_be_bytes().to_vec()}} - /// - /// fn main() { - /// let mut db = Db::create_tmp("my_db_rt_g").unwrap(); - /// // Initialize the table - /// db.define::(); - /// - /// // Insert a new data - /// let mut txn = db.transaction().unwrap(); - /// { - /// let mut tables = txn.tables(); - /// tables.insert(&txn, Data(1)).unwrap(); - /// } - /// txn.commit().unwrap(); // /!\ Don't forget to commit - /// - /// // Get a value from the table - /// let txn_read = db.read_transaction().unwrap(); - /// let mut tables = txn_read.tables(); - /// - /// // Using explicit type (turbofish syntax) - /// let value = tables.primary_get::(&txn_read, &1u32.to_be_bytes()); - /// - /// // Using type inference - /// let value: Option = tables.primary_get(&txn_read, &1u32.to_be_bytes()).unwrap(); - /// } - fn primary_get( - &mut self, - txn: &'txn Self::Transaction<'db>, - key: &[u8], - ) -> Result> { - let table_name = T::struct_db_schema().table_name; - self.open_primary_table(txn, table_name)?; - let table = self.get_table(table_name).unwrap(); - let item = table.get(key)?; - Ok(unwrap_item(item)) - } - - /// Iterate over all the values of the table. - /// Available in [`Tables`](crate::Tables) and [`ReadOnlyTables`](crate::ReadOnlyTables). - /// - /// # Example - /// ``` - /// use serde::{Deserialize, Serialize}; - /// use struct_db::*; - /// - /// #[derive(Serialize, Deserialize, Eq, PartialEq, Debug)] - /// #[struct_db(pk = p_key)] - /// struct Data(u32); - /// impl Data{ pub fn p_key(&self) -> Vec {self.0.to_be_bytes().to_vec()} } - /// - /// fn main() { - /// use std::arch::asm; - /// let mut db = Db::create_tmp("my_db_p_iter").unwrap(); - /// // Initialize the table - /// db.define::(); - /// - /// // Insert a new data - /// let mut txn = db.transaction().unwrap(); - /// { - /// let mut tables = txn.tables(); - /// tables.insert(&txn, Data(1)).unwrap(); - /// } - /// txn.commit().unwrap(); // /!\ Don't forget to commit - /// - /// // Iterate over all the values of the table - /// let txn_read = db.read_transaction().unwrap(); - /// let mut tables = txn_read.tables(); - /// - /// for value in tables.primary_iter::(&txn_read).unwrap() { - /// assert_eq!(value, Data(1)); - /// } - /// } - fn primary_iter<'a, T: SDBItem>( - &'a mut self, - txn: &'txn Self::Transaction<'db>, - ) -> Result> - where - 'db: 'a, - 'txn: 'a, - { - self.primary_iter_range(txn, ..) - } - - /// Iterate over all the values of the table that are in the given range. - /// Available in [`Tables`](crate::Tables) and [`ReadOnlyTables`](crate::ReadOnlyTables). - /// - /// # Example - /// - Similar to [`primary_iter`](ReadableTable::primary_iter) but with a range. - /// - See tests/09_iterator.rs for more examples. - fn primary_iter_range<'a, 'b, T>( - &'a mut self, - txn: &'txn Self::Transaction<'db>, - range_value: impl RangeBounds<&'a [u8]> + 'a, - ) -> Result> - where - T: SDBItem, - 'db: 'a, - 'txn: 'a, - { - let table_name = T::struct_db_schema().table_name; - self.open_primary_table(txn, table_name)?; - let table = self.get_table(table_name).unwrap(); - let range = table.range::<&'_ [u8]>(range_value)?; - Ok(PrimaryIterator { - range, - _marker: PhantomData, - }) - } - - /// Iterate over all the values of the table that start with the given prefix. - /// Available in [`Tables`](crate::Tables) and [`ReadOnlyTables`](crate::ReadOnlyTables). - /// - /// # Example - /// - Similar to [`primary_iter`](ReadableTable::primary_iter) but with a prefix. - /// - See tests/09_iterator.rs for more examples. - fn primary_iter_start_with<'a, T>( - &'a mut self, - txn: &'txn Self::Transaction<'db>, - prefix_value: &'a [u8], - ) -> Result> - where - T: SDBItem, - 'db: 'a, - 'txn: 'a, - { - let table_name = T::struct_db_schema().table_name; - self.open_primary_table(txn, table_name)?; - let table = self.get_table(table_name).unwrap(); - let range = table.range::<&'_ [u8]>(prefix_value..)?; - Ok(PrimaryIteratorStartWith { - range, - start_with: prefix_value, - _marker: PhantomData, - }) - } - - /// Get a value from the table using a secondary key. - /// Returns `Ok(None)` if the key does not exist. - /// Available in [`Tables`](crate::Tables) and [`ReadOnlyTables`](crate::ReadOnlyTables). - /// - /// Set the key_definition: use the `Key` enum generated by the `struct_db` - /// macro to specify the key. Like this: `Key::`. - /// - /// E.g: `tables.get_by_key(&txn_read, Key::, &your_key)` - /// - /// # Example - /// ``` - /// use serde::{Deserialize, Serialize}; - /// use struct_db::*; - /// - /// #[derive(Serialize, Deserialize, Eq, PartialEq, Debug)] - /// #[struct_db(pk = p_key,gk = s_key)] - /// struct Data(u32, String); - /// impl Data { - /// pub fn p_key(&self) -> Vec {self.0.to_be_bytes().to_vec()} - /// pub fn s_key(&self) -> Vec {self.1.as_bytes().to_vec()} - /// } - /// - /// fn main() { - /// let mut db = Db::create_tmp("my_db_rt_gk").unwrap(); - /// // Initialize the table - /// db.define::(); - /// - /// // Insert a new data - /// let mut txn = db.transaction().unwrap(); - /// { - /// let mut tables = txn.tables(); - /// tables.insert(&txn, Data(1, "hello".to_string())).unwrap(); - /// } - /// txn.commit().unwrap(); // /!\ Don't forget to commit - /// - /// // Get a value from the table - /// let txn_read = db.read_transaction().unwrap(); - /// let mut tables = txn_read.tables(); - /// // Using explicit type (turbofish syntax) - /// let value = tables.secondary_get::(&txn_read, DataKey::s_key, &"hello".as_bytes()); - /// - /// // Using type inference - /// let value: Option = tables.secondary_get(&txn_read, DataKey::s_key, &"hello".as_bytes()).unwrap(); - /// } - fn secondary_get( - &mut self, - txn: &'txn Self::Transaction<'db>, - key_def: impl KeyDefinition, - key: &[u8], - ) -> Result> { - let table_name = key_def.secondary_table_name(); - - let primary_key: Vec = { - self.open_secondary_table(txn, T::struct_db_schema().table_name, table_name)?; - let table = self.get_table(table_name).unwrap(); - let value = table.get(key)?; - if let Some(value) = value { - value.value().into() - } else { - return Ok(None); - } - }; - - Ok(Some(self.primary_get(txn, &primary_key)?.ok_or( - crate::Error::PrimaryKeyNotFound { - primary_key: primary_key.to_vec(), - }, - )?)) - } - - /// Iterate over all the values of the table that start with the given prefix. - /// Available in [`Tables`](crate::Tables) and [`ReadOnlyTables`](crate::ReadOnlyTables). - /// - /// # Example - /// - Similar to [`primary_iter`](ReadableTable::primary_iter) but with a prefix. - /// - See [`get_by_key`](crate::Tables::secondary_get) too know how to set the key_definition. - /// - See tests/09_iterator.rs for more examples. - fn secondary_iter<'a, T: SDBItem>( - &mut self, - txn: &'txn Self::Transaction<'db>, - key_def: impl KeyDefinition, - ) -> Result> { - self.secondary_iter_range(txn, key_def, ..) - } - - /// Iterate over all the values of the table that start with the given prefix. - /// Available in [`Tables`](crate::Tables) and [`ReadOnlyTables`](crate::ReadOnlyTables). - /// - /// # Example - /// - Similar to [`primary_iter`](ReadableTable::primary_iter) but with a prefix. - /// - See [`get_by_key`](crate::Tables::secondary_get) too know how to set the key_definition. - /// - See tests/09_iterator.rs for more examples. - fn secondary_iter_range<'a, 'b, T>( - &'a mut self, - txn: &'txn Self::Transaction<'db>, - key_def: impl KeyDefinition, - range_key: impl RangeBounds<&'b [u8]> + 'b, - ) -> Result> - where - T: SDBItem, - 'a: 'b, - { - let main_table_name = T::struct_db_schema().table_name; - self.open_primary_table(txn, main_table_name)?; - let secondary_table_name = key_def.secondary_table_name(); - self.open_secondary_table(txn, main_table_name, secondary_table_name)?; - - let main_table = self.get_table(main_table_name).unwrap(); - let secondary_table = self.get_table(secondary_table_name).unwrap(); - let range = secondary_table.range::<&'_ [u8]>(range_key)?; - - Ok(SecondaryIterator { - range, - main_table, - _marker: PhantomData, - }) - } - - /// Iterate over all the values of the table that start with the given prefix. - /// Available in [`Tables`](crate::Tables) and [`ReadOnlyTables`](crate::ReadOnlyTables). - /// - /// # Example - /// - Similar to [`primary_iter`](ReadableTable::primary_iter) but with a prefix. - /// - See [`get_by_key`](crate::Tables::secondary_get) too know how to set the key_definition. - /// - See tests/09_iterator.rs for more examples. - fn secondary_iter_start_with<'a, 'b, T>( - &'a mut self, - txn: &'txn Self::Transaction<'db>, - key_def: impl KeyDefinition, - key_prefix: &'b [u8], - ) -> Result> - where - T: SDBItem, - 'b: 'a, - { - let main_table_name = T::struct_db_schema().table_name; - self.open_primary_table(txn, main_table_name)?; - let secondary_table_name = key_def.secondary_table_name(); - self.open_secondary_table(txn, main_table_name, secondary_table_name)?; - - let main_table = self.get_table(main_table_name).unwrap(); - let secondary_table = self.get_table(secondary_table_name).unwrap(); - let range = secondary_table.range::<&'_ [u8]>(key_prefix..)?; - - Ok(SecondaryIteratorStartWith { - range, - start_with: key_prefix, - main_table, - _marker: PhantomData, - }) - } - - /// Returns the number of elements in the table. - /// Available in [`Tables`](crate::Tables) and [`ReadOnlyTables`](crate::ReadOnlyTables). - /// - /// # Example - /// ``` - /// use serde::{Deserialize, Serialize}; - /// use struct_db::*; - /// - /// #[derive(Serialize, Deserialize, Eq, PartialEq, Debug)] - /// #[struct_db(pk = p_key)] - /// struct Data(u32); - /// impl Data{ pub fn p_key(&self) -> Vec {self.0.to_be_bytes().to_vec()} } - /// - /// fn main() { - /// use std::arch::asm; - /// let mut db = Db::create_tmp("my_db_len").unwrap(); - /// // Initialize the table - /// db.define::(); - /// - /// // Insert a new data - /// let mut txn = db.transaction().unwrap(); - /// { - /// let mut tables = txn.tables(); - /// tables.insert(&txn, Data(1)).unwrap(); - /// } - /// txn.commit().unwrap(); // /!\ Don't forget to commit - /// - /// // Get the number of elements - /// let txn_read = db.read_transaction().unwrap(); - /// let mut tables = txn_read.tables(); - /// let len = tables.len::(&txn_read).unwrap(); - /// assert_eq!(len, 1); - /// } - fn len(&mut self, txn: &'txn Self::Transaction<'db>) -> Result { - let table_name = T::struct_db_schema().table_name; - self.open_primary_table(txn, table_name)?; - let table = self.get_table(table_name).unwrap(); - let result = table.len()?; - Ok(result) - } -} diff --git a/src/readonly_tables.rs b/src/readonly_tables.rs deleted file mode 100644 index 09a9cfa0..00000000 --- a/src/readonly_tables.rs +++ /dev/null @@ -1,69 +0,0 @@ -use crate::table_definition::PrimaryTableDefinition; -use crate::Error::TableDefinitionNotFound; -use crate::Result; -use crate::{ReadOnlyTransaction, ReadableTable}; -use std::collections::HashMap; - -/// A collection of read-only tables. Only read operations available through the [`ReadableTable`](crate::ReadableTable) trait -/// are allowed. -pub struct ReadOnlyTables<'db, 'txn> { - pub(crate) table_definitions: &'db HashMap<&'static str, PrimaryTableDefinition>, - pub(crate) opened_read_only_tables: - HashMap<&'static str, redb::ReadOnlyTable<'txn, &'static [u8], &'static [u8]>>, -} - -impl<'db, 'txn> ReadableTable<'db, 'txn> for ReadOnlyTables<'db, 'txn> { - type Table = redb::ReadOnlyTable<'txn, &'static [u8], &'static [u8]>; - type Transaction<'x> = ReadOnlyTransaction<'db>; - - fn open_primary_table( - &mut self, - txn: &'txn Self::Transaction<'db>, - table_name: &'static str, - ) -> Result<()> { - let table = self - .table_definitions - .get(table_name) - .ok_or(TableDefinitionNotFound { - table: table_name.to_string(), - })?; - if !self.opened_read_only_tables.contains_key(table_name) { - let table = txn.txn.open_table(table.redb)?; - self.opened_read_only_tables.insert(table_name, table); - } - Ok(()) - } - - fn open_secondary_table( - &mut self, - txn: &'txn Self::Transaction<'db>, - primary_table_name: &'static str, - secondary_table_name: &'static str, - ) -> Result<()> { - let primary_table = - self.table_definitions - .get(primary_table_name) - .ok_or(TableDefinitionNotFound { - table: primary_table_name.to_string(), - })?; - let secondary_table = primary_table - .secondary_tables - .get(secondary_table_name) - .ok_or(TableDefinitionNotFound { - table: secondary_table_name.to_string(), - })?; - if !self - .opened_read_only_tables - .contains_key(secondary_table_name) - { - let table = txn.txn.open_table(secondary_table.rdb())?; - self.opened_read_only_tables - .insert(secondary_table_name, table); - } - Ok(()) - } - - fn get_table(&self, table_name: &'static str) -> Option<&Self::Table> { - self.opened_read_only_tables.get(table_name) - } -} diff --git a/src/readonly_transaction.rs b/src/readonly_transaction.rs deleted file mode 100644 index 04433465..00000000 --- a/src/readonly_transaction.rs +++ /dev/null @@ -1,18 +0,0 @@ -use crate::table_definition::PrimaryTableDefinition; -use crate::ReadOnlyTables; -use std::collections::HashMap; - -/// Can open only [`ReadOnlyTables`](crate::ReadOnlyTables). -pub struct ReadOnlyTransaction<'db> { - pub(crate) table_definitions: &'db HashMap<&'static str, PrimaryTableDefinition>, - pub(crate) txn: redb::ReadTransaction<'db>, -} - -impl<'db> ReadOnlyTransaction<'db> { - pub fn tables<'txn>(&'txn self) -> ReadOnlyTables<'db, 'txn> { - ReadOnlyTables { - table_definitions: self.table_definitions, - opened_read_only_tables: Default::default(), - } - } -} diff --git a/src/schema.rs b/src/schema.rs deleted file mode 100644 index fb3894e4..00000000 --- a/src/schema.rs +++ /dev/null @@ -1,7 +0,0 @@ -/// Schema of the Item. Returned by the [`::struct_db_schema()`](crate::SDBItem::struct_db_schema) method. -#[derive(Clone, Debug)] -pub struct Schema { - pub table_name: &'static str, - pub primary_key: &'static str, - pub secondary_tables_name: std::collections::HashSet<&'static str>, -} diff --git a/src/serialization.rs b/src/serialization.rs index 81e04155..8c2b12f6 100644 --- a/src/serialization.rs +++ b/src/serialization.rs @@ -1,11 +1,3 @@ -#[cfg(not(feature = "native_model"))] -pub fn bincode_encode_to_vec(value: &T) -> Option> -where - T: serde::Serialize, -{ - bincode::serde::encode_to_vec(value, bincode::config::standard()).ok() -} -#[cfg(feature = "native_model")] pub fn bincode_encode_to_vec(value: &T) -> Option> where T: serde::Serialize + native_model::Model, @@ -13,15 +5,6 @@ where native_model::encode(value).ok() } -#[cfg(not(feature = "native_model"))] -pub fn bincode_decode_from_slice(slice: &[u8]) -> Option<(T, usize)> -where - T: serde::de::DeserializeOwned, -{ - bincode::serde::decode_from_slice(slice, bincode::config::standard()).ok() -} - -#[cfg(feature = "native_model")] pub fn bincode_decode_from_slice(slice: &[u8]) -> Option<(T, usize)> where T: serde::de::DeserializeOwned + native_model::Model, diff --git a/src/snapshot.rs b/src/snapshot.rs new file mode 100644 index 00000000..56ec9ca6 --- /dev/null +++ b/src/snapshot.rs @@ -0,0 +1,36 @@ +use crate::db_type::Result; +use crate::{Database, DatabaseBuilder}; +use redb::ReadableTable; +use std::path::Path; + +impl Database<'_> { + pub fn snapshot<'a>(&self, builder: &'a DatabaseBuilder, path: &Path) -> Result> { + // TODO: builder must have well defined models + let new_db = builder.create(path)?; + let r = self.instance.begin_read()?; + let w = new_db.instance.begin_write()?; + { + // Copy primary tables + for (_, primary_table_definition) in &self.primary_table_definitions { + let table = r.open_table(primary_table_definition.redb)?; + let mut new_table = w.open_table(primary_table_definition.redb)?; + for result in table.iter()? { + let (key, value) = result?; + new_table.insert(key.value(), value.value())?; + } + + // Copy secondary tables + for (_, secondary_table_definition) in &primary_table_definition.secondary_tables { + let table = r.open_table(secondary_table_definition.redb)?; + let mut new_table = w.open_table(secondary_table_definition.redb)?; + for result in table.iter()? { + let (key, value) = result?; + new_table.insert(key.value(), value.value())?; + } + } + } + } + w.commit()?; + Ok(new_db) + } +} diff --git a/src/stats.rs b/src/stats.rs index c12460c1..521b3941 100644 --- a/src/stats.rs +++ b/src/stats.rs @@ -1,10 +1,11 @@ #[derive(Debug)] pub struct Stats { - pub stats_tables: Vec, + pub primary_tables: Vec, + pub secondary_tables: Vec, } #[derive(Debug)] pub struct StatsTable { pub name: String, - pub num_raw: usize, + pub n_entries: Option, } diff --git a/src/table_definition.rs b/src/table_definition.rs index b766cb34..cc759931 100644 --- a/src/table_definition.rs +++ b/src/table_definition.rs @@ -1,20 +1,23 @@ -use crate::{schema, SDBItem}; -use redb::TableHandle; +use crate::builder::ModelBuilder; +use crate::db_type::{DatabaseInnerKeyValue, DatabaseKeyDefinition, DatabaseSecondaryKeyOptions}; use std::collections::HashMap; use std::fmt::Debug; -#[cfg(not(feature = "native_model"))] -pub(crate) struct PrimaryTableDefinition { - pub(crate) schema: crate::Schema, - pub(crate) redb: redb::TableDefinition<'static, &'static [u8], &'static [u8]>, - pub(crate) secondary_tables: HashMap<&'static str, SecondaryTableDefinition>, +pub(crate) type RedbPrimaryTableDefinition<'a> = + redb::TableDefinition<'a, DatabaseInnerKeyValue, &'static [u8]>; +pub(crate) type RedbSecondaryTableDefinition<'a> = + redb::TableDefinition<'a, DatabaseInnerKeyValue, DatabaseInnerKeyValue>; + +pub struct PrimaryTableDefinition<'a> { + pub(crate) model: crate::Model, + pub(crate) redb: RedbPrimaryTableDefinition<'a>, + pub(crate) secondary_tables: + HashMap, SecondaryTableDefinition<'a>>, + pub(crate) native_model_options: NativeModelOptions, } -#[cfg(feature = "native_model")] -pub(crate) struct PrimaryTableDefinition { - pub(crate) schema: crate::Schema, - pub(crate) redb: redb::TableDefinition<'static, &'static [u8], &'static [u8]>, - pub(crate) secondary_tables: HashMap<&'static str, SecondaryTableDefinition>, +#[derive(Clone, Debug)] +pub struct NativeModelOptions { pub(crate) native_model_id: u32, pub(crate) native_model_version: u32, // If a model as a new version, the old version is still available but marked as legacy. @@ -23,39 +26,9 @@ pub(crate) struct PrimaryTableDefinition { pub(crate) native_model_legacy: bool, } -impl - From<( - schema::Schema, - redb::TableDefinition<'static, &'static [u8], &'static [u8]>, - )> for PrimaryTableDefinition -{ - #[cfg(not(feature = "native_model"))] - fn from( - input: ( - schema::Schema, - redb::TableDefinition<'static, &'static [u8], &'static [u8]>, - ), - ) -> Self { - let (schema, redb) = input; - Self { - schema, - redb, - secondary_tables: HashMap::new(), - } - } - - #[cfg(feature = "native_model")] - fn from( - input: ( - schema::Schema, - redb::TableDefinition<'static, &'static [u8], &'static [u8]>, - ), - ) -> Self { - let (schema, redb) = input; +impl Default for NativeModelOptions { + fn default() -> Self { Self { - schema, - redb, - secondary_tables: HashMap::new(), native_model_id: 0, native_model_version: 0, native_model_legacy: false, @@ -63,41 +36,40 @@ impl } } -#[cfg(feature = "native_model")] -impl Debug for PrimaryTableDefinition { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("TableDefinition") - .field("name", &self.redb.name()) - .field("model_id", &self.native_model_id) - .field("model_version", &self.native_model_version) - .field("legacy", &self.native_model_legacy) - .finish() +impl<'a> From<(&ModelBuilder, RedbPrimaryTableDefinition<'a>)> for PrimaryTableDefinition<'a> { + fn from(input: (&ModelBuilder, RedbPrimaryTableDefinition<'a>)) -> Self { + let (builder, redb) = input; + Self { + model: builder.model.clone(), + redb, + secondary_tables: HashMap::new(), + native_model_options: builder.native_model_options.clone(), + } } } -#[cfg(not(feature = "native_model"))] -impl Debug for PrimaryTableDefinition { +impl Debug for PrimaryTableDefinition<'_> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + use redb::TableHandle; f.debug_struct("TableDefinition") .field("name", &self.redb.name()) + .field("model_id", &self.native_model_options.native_model_id) + .field( + "model_version", + &self.native_model_options.native_model_version, + ) + .field("legacy", &self.native_model_options.native_model_legacy) .finish() } } -pub(crate) struct SecondaryTableDefinition { - pub(crate) rdb: redb::TableDefinition<'static, &'static [u8], &'static [u8]>, -} - -impl From> - for SecondaryTableDefinition -{ - fn from(rdb: redb::TableDefinition<'static, &'static [u8], &'static [u8]>) -> Self { - Self { rdb } - } +#[derive(Clone)] +pub(crate) struct SecondaryTableDefinition<'a> { + pub(crate) redb: RedbSecondaryTableDefinition<'a>, } -impl SecondaryTableDefinition { - pub(crate) fn rdb(&self) -> redb::TableDefinition<'static, &'static [u8], &'static [u8]> { - self.rdb +impl<'a> From> for SecondaryTableDefinition<'a> { + fn from(rdb: RedbSecondaryTableDefinition<'a>) -> SecondaryTableDefinition<'a> { + Self { redb: rdb } } } diff --git a/src/tables.rs b/src/tables.rs deleted file mode 100644 index 9f34fff3..00000000 --- a/src/tables.rs +++ /dev/null @@ -1,357 +0,0 @@ -use crate::item::{BinaryValue, Item}; -use crate::table_definition::PrimaryTableDefinition; -use crate::watch; -use crate::watch::{Event, WatcherRequest}; -use crate::Error::TableDefinitionNotFound; -use crate::Result; -use crate::{ReadableTable, SDBItem, Transaction}; -use std::cell::RefCell; -use std::collections::HashMap; -use std::ops::RangeBounds; - -/// A collection of read-write tables. Read operation from [`ReadableTable`](crate::ReadableTable) -/// and write operations [`insert`](crate::Tables::insert), [`update`](crate::Tables::update), [`remove`](crate::Tables::remove) -/// and [`migrate`](crate::Tables::migrate) are available. -pub struct Tables<'db, 'txn> { - pub(crate) table_definitions: &'db HashMap<&'static str, PrimaryTableDefinition>, - pub(crate) opened_tables: - HashMap<&'static str, redb::Table<'db, 'txn, &'static [u8], &'static [u8]>>, - pub(crate) batch: &'txn RefCell, -} - -impl<'db, 'txn> ReadableTable<'db, 'txn> for Tables<'db, 'txn> { - type Table = redb::Table<'db, 'txn, &'static [u8], &'static [u8]>; - type Transaction<'x> = Transaction<'db>; - - fn open_primary_table( - &mut self, - txn: &'txn Self::Transaction<'db>, - primary_table_name: &'static str, - ) -> Result<()> { - let table = - self.table_definitions - .get(primary_table_name) - .ok_or(TableDefinitionNotFound { - table: primary_table_name.to_string(), - })?; - if !self.opened_tables.contains_key(primary_table_name) { - let table = txn.txn.open_table(table.redb)?; - self.opened_tables.insert(primary_table_name, table); - } - Ok(()) - } - - fn open_secondary_table( - &mut self, - txn: &'txn Self::Transaction<'db>, - primary_table_name: &'static str, - secondary_table_name: &'static str, - ) -> Result<()> { - let primary_table = - self.table_definitions - .get(primary_table_name) - .ok_or(TableDefinitionNotFound { - table: primary_table_name.to_string(), - })?; - let secondary_table = primary_table - .secondary_tables - .get(secondary_table_name) - .ok_or(TableDefinitionNotFound { - table: secondary_table_name.to_string(), - })?; - if !self.opened_tables.contains_key(secondary_table_name) { - let table = txn.txn.open_table(secondary_table.rdb())?; - self.opened_tables.insert(secondary_table_name, table); - } - Ok(()) - } - - fn get_table(&self, table_name: &'static str) -> Option<&Self::Table> { - self.opened_tables.get(table_name) - } -} - -impl<'db, 'txn> Tables<'db, 'txn> { - /// Insert data into the database. - /// - /// Send a [`event::Insert`](watch::Insert) event that you can - /// receive using [`watch`](crate::Db::primary_watch) or others `watch_*` functions. - /// - /// # Example - /// ``` - /// use serde::{Deserialize, Serialize}; - /// use struct_db::*; - /// - /// #[derive(Serialize, Deserialize, Eq, PartialEq, Debug)] - /// #[struct_db(pk = p_key,gk = s_key)] - /// struct Data(u32, String); - /// impl Data { - /// pub fn p_key(&self) -> Vec {self.0.to_be_bytes().to_vec()} - /// pub fn s_key(&self) -> Vec {self.1.as_bytes().to_vec()} - /// } - /// - /// fn main() { - /// let mut db = Db::create_tmp("my_db_t_insert").unwrap(); - /// // Initialize the table - /// db.define::(); - /// - /// // Insert a new data - /// let mut txn = db.transaction().unwrap(); - /// { - /// let mut tables = txn.tables(); - /// tables.insert(&txn, Data(1, "hello".to_string())).unwrap(); - /// } - /// txn.commit().unwrap(); // /!\ Don't forget to commit - /// } - pub fn insert(&mut self, txn: &'txn Transaction<'db>, item: T) -> Result<()> { - let (watcher_request, binary_value) = - self.internal_insert(txn, T::struct_db_schema(), item.to_item())?; - let event = Event::new_insert(binary_value); - self.batch.borrow_mut().add(watcher_request, event); - Ok(()) - } - - // fn internal_insert( - // &mut self, - // txn: &'txn Transaction<'db>, - // item: T, - // ) -> Result<(WatcherRequest, BinaryValue)> { - // let item: Item = item.to_item(); - // let schema = T::struct_db_schema(); - // self.internal_insert_2(txn, schema, item) - // } - - /// Update data in the database. - /// - /// Send a [`event::Update`](watch::Update) event that you can - /// receive using [`watch`](crate::Db::primary_watch) or others `watch_*` functions. - /// - /// # Example - /// ``` - /// use serde::{Deserialize, Serialize}; - /// use struct_db::*; - /// - /// #[derive(Serialize, Deserialize, Eq, PartialEq, Debug)] - /// #[struct_db(pk = p_key)] - /// struct Data(u32); - /// impl Data{ pub fn p_key(&self) -> Vec {self.0.to_be_bytes().to_vec()} } - /// - /// fn main() { - /// let mut db = Db::create_tmp("my_db_t_update").unwrap(); - /// // Initialize the table - /// db.define::(); - /// - /// // Insert a new data - /// let mut txn = db.transaction().unwrap(); - /// { - /// let mut tables = txn.tables(); - /// tables.insert(&txn, Data(1)).unwrap(); - /// } - /// txn.commit().unwrap(); // /!\ Don't forget to commit - /// - /// // Update the data, e.g: increment the value - /// let mut txn = db.transaction().unwrap(); - /// { - /// let mut tables = txn.tables(); - /// let old_data = tables.primary_get::(&txn, &1u32.to_be_bytes()).unwrap().unwrap(); - /// let new_data = Data(old_data.0 + 1); - /// tables.update(&txn, old_data, new_data).unwrap(); - /// } - /// txn.commit().unwrap(); // /!\ Don't forget to commit - /// - /// // Get the updated data - /// let mut txn = db.read_transaction().unwrap(); - /// let mut tables = txn.tables(); - /// let data:Data = tables.primary_get(&txn, &2u32.to_be_bytes()).unwrap().unwrap(); - /// assert_eq!(data, Data(2)); - /// } - pub fn update( - &mut self, - txn: &'txn Transaction<'db>, - old_item: T, - updated_item: T, - ) -> Result<()> { - let (_, old_binary_value) = self.internal_remove(txn, old_item)?; - let (watcher_request, new_binary_value) = - self.internal_insert(txn, T::struct_db_schema(), updated_item.to_item())?; - - let event = Event::new_update(old_binary_value, new_binary_value); - self.batch.borrow_mut().add(watcher_request, event); - Ok(()) - } - - /// Remove data from the database. - /// - /// Send a [`event::Delete`](watch::Delete) event that you can - /// receive using [`watch`](crate::Db::primary_watch) or others `watch_*` functions. - /// - /// # Example - /// ``` - /// use serde::{Deserialize, Serialize}; - /// use struct_db::*; - /// - /// #[derive(Serialize, Deserialize, Eq, PartialEq, Debug)] - /// #[struct_db(pk = p_key)] - /// struct Data(u32); - /// impl Data{ pub fn p_key(&self) -> Vec {self.0.to_be_bytes().to_vec()} } - /// - /// fn main() { - /// let mut db = Db::create_tmp("my_db_t_remove").unwrap(); - /// // Initialize the table - /// db.define::(); - /// - /// // Insert a new data - /// let mut txn = db.transaction().unwrap(); - /// { - /// let mut tables = txn.tables(); - /// tables.insert(&txn, Data(1)).unwrap(); - /// } - /// txn.commit().unwrap(); // /!\ Don't forget to commit - /// - /// // Remove the data - /// let mut txn = db.transaction().unwrap(); - /// { - /// let mut tables = txn.tables(); - /// tables.remove(&txn, Data(1)).unwrap(); - /// } - /// txn.commit().unwrap(); // /!\ Don't forget to commit - /// - /// // Get the removed data - /// let mut txn = db.read_transaction().unwrap(); - /// let mut tables = txn.tables(); - /// let data:Option = tables.primary_get(&txn, &1u32.to_be_bytes()).unwrap(); - /// assert_eq!(data, None); - /// } - pub fn remove(&mut self, txn: &'txn Transaction<'db>, item: T) -> Result<()> { - let (watcher_request, binary_value) = self.internal_remove(txn, item)?; - let event = Event::new_delete(binary_value); - self.batch.borrow_mut().add(watcher_request, event); - Ok(()) - } - - fn internal_remove( - &mut self, - txn: &'txn Transaction<'db>, - item: T, - ) -> Result<(WatcherRequest, BinaryValue)> { - let schema = T::struct_db_schema(); - let table_name = schema.table_name; - - let primary_key = item.struct_db_pk(); - let keys = item.struct_db_gks(); - let value = item.struct_db_bincode_encode_to_vec(); - { - self.open_primary_table(txn, table_name)?; - let table = self.opened_tables.get_mut(table_name).unwrap(); - table.remove(&primary_key.as_slice())?; - } - - for (secondary_table_name, value) in &keys { - self.open_secondary_table(txn, table_name, secondary_table_name)?; - let secondary_table = self.opened_tables.get_mut(secondary_table_name).unwrap(); - secondary_table.remove(&value.as_slice())?; - } - - Ok(( - WatcherRequest::new(table_name, primary_key, keys), - BinaryValue(value), - )) - } - - /// Migration from a type to another. - /// - /// Not send any event. - /// - /// # Example - /// ``` - /// use serde::{Deserialize, Serialize}; - /// use struct_db::*; - /// - /// type Data = DataV2; - /// - /// #[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] - /// #[struct_db(pk = p_key)] - /// struct DataV1(u32); - /// - /// impl DataV1 { - /// pub fn p_key(&self) -> Vec { - /// self.0.to_be_bytes().to_vec() - /// } - /// } - /// - /// #[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] - /// #[struct_db(pk = p_key)] - /// struct DataV2(String); - /// - /// impl DataV2 { - /// pub fn p_key(&self) -> Vec { - /// self.0.as_bytes().to_vec() - /// } - /// } - /// - /// impl From for DataV2 { - /// fn from(av1: DataV1) -> Self { - /// Self(av1.0.to_string()) - /// } - /// } - /// - /// fn main() { - /// let mut db = Db::create_tmp("my_db_t_migration").unwrap(); - /// - /// db.define::(); - /// db.define::(); - /// - /// let data = DataV1(42); - /// - /// let txn = db.transaction().unwrap(); - /// { - /// let mut tables = txn.tables(); - /// tables.insert(&txn, data).unwrap(); - /// } - /// txn.commit().unwrap(); - /// - /// // Migrate - /// let txn = db.transaction().unwrap(); - /// { - /// let mut tables = txn.tables(); - /// tables.migrate::(&txn).unwrap(); - /// } - /// txn.commit().unwrap(); - /// - /// // Check migration - /// let txn = db.read_transaction().unwrap(); - /// let mut tables = txn.tables(); - /// let data = tables.primary_get::(&txn, "42".as_bytes()).unwrap().unwrap(); - /// println!("migrated data: {:?}", data); - /// } - pub fn migrate(&mut self, txn: &'txn Transaction<'db>) -> Result<()> - where - OldType: SDBItem + Clone, - NewType: SDBItem + From, - { - let find_all_old: Vec = self.primary_iter(txn).unwrap().collect(); - for old in find_all_old { - let new: NewType = old.clone().into(); - self.internal_insert(txn, NewType::struct_db_schema(), new.to_item())?; - self.internal_remove(txn, old)?; - } - - Ok(()) - } - - // TODO: rename to drain, add add to argument a range - pub fn primary_drain<'a, T: SDBItem>( - &mut self, - txn: &'txn Transaction<'db>, - range_value: impl RangeBounds<&'a [u8]> + 'a + Copy, - ) -> Result> { - let drained_data = - self.internal_primary_drain(txn, T::struct_db_schema().table_name, range_value)?; - let mut items = vec![]; - for binary_value in drained_data { - let item = T::struct_db_bincode_decode_from_slice(binary_value.0.as_slice()); - items.push(item); - } - Ok(items) - } -} diff --git a/src/transaction.rs b/src/transaction.rs deleted file mode 100644 index a5a98046..00000000 --- a/src/transaction.rs +++ /dev/null @@ -1,32 +0,0 @@ -use crate::table_definition::PrimaryTableDefinition; -use crate::watch; -use crate::{Result, Tables}; -use std::cell::RefCell; -use std::collections::HashMap; -use std::sync::{Arc, RwLock}; - -/// Can open only [`Tables`](crate::Tables). -pub struct Transaction<'db> { - pub(crate) table_definitions: &'db HashMap<&'static str, PrimaryTableDefinition>, - pub(crate) txn: redb::WriteTransaction<'db>, - pub(crate) watcher: &'db Arc>, - pub(crate) batch: RefCell, -} - -impl<'db> Transaction<'db> { - pub fn tables<'txn>(&'txn self) -> Tables<'db, 'txn> { - Tables { - table_definitions: self.table_definitions, - opened_tables: HashMap::new(), - batch: &self.batch, - } - } - - pub fn commit(self) -> Result<()> { - self.txn.commit()?; - // Send batch to watchers after commit succeeds - let batch = self.batch.into_inner(); - watch::push_batch(Arc::clone(&self.watcher), batch)?; - Ok(()) - } -} diff --git a/src/transaction/internal/mod.rs b/src/transaction/internal/mod.rs new file mode 100644 index 00000000..6dbad5b4 --- /dev/null +++ b/src/transaction/internal/mod.rs @@ -0,0 +1,3 @@ +pub mod private_readable_transaction; +pub mod r_transaction; +pub mod rw_transaction; diff --git a/src/transaction/internal/private_readable_transaction.rs b/src/transaction/internal/private_readable_transaction.rs new file mode 100644 index 00000000..cea84097 --- /dev/null +++ b/src/transaction/internal/private_readable_transaction.rs @@ -0,0 +1,68 @@ +use crate::db_type::{ + DatabaseInnerKeyValue, DatabaseKeyDefinition, DatabaseOutputValue, DatabaseSecondaryKeyOptions, + Error, InnerKeyValue, KeyDefinition, Result, +}; +use crate::table_definition::PrimaryTableDefinition; +use crate::Model; +use redb::ReadableTable; +use std::collections::HashMap; + +pub trait PrivateReadableTransaction<'db, 'txn> { + type RedbPrimaryTable: ReadableTable; + type RedbSecondaryTable: ReadableTable; + + type RedbTransaction<'db_bis> + where + Self: 'db_bis; + + fn table_definitions(&self) -> &HashMap; + + fn get_primary_table(&'txn self, model: &Model) -> Result; + + fn get_secondary_table( + &'txn self, + model: &Model, + secondary_key: &DatabaseKeyDefinition, + ) -> Result; + + fn get_by_primary_key( + &'txn self, + model: Model, + key: impl InnerKeyValue, + ) -> Result> { + let table = self.get_primary_table(&model)?; + let key = key.database_inner_key_value(); + let item = table.get(key)?; + Ok(item.map(|item| item.value().into())) + } + + fn get_by_secondary_key( + &'txn self, + model: Model, + key_def: impl KeyDefinition, + key: impl InnerKeyValue, + ) -> Result> { + let secondary_key = key_def.database_key(); + // Provide a better error for the test of unicity of the secondary key + model.check_secondary_options(&secondary_key, |options| options.unique == true)?; + + let table = self.get_secondary_table(&model, &secondary_key)?; + let value = table.get(key.database_inner_key_value())?; + let primary_key = if let Some(value) = value { + value.value().to_owned() + } else { + return Ok(None); + }; + + Ok(Some( + self.get_by_primary_key(model, primary_key)? + .ok_or(Error::PrimaryKeyNotFound)?, + )) + } + + fn primary_len(&'txn self, model: Model) -> Result { + let table = self.get_primary_table(&model)?; + let result = table.len()?; + Ok(result) + } +} diff --git a/src/transaction/internal/r_transaction.rs b/src/transaction/internal/r_transaction.rs new file mode 100644 index 00000000..617087bd --- /dev/null +++ b/src/transaction/internal/r_transaction.rs @@ -0,0 +1,62 @@ +use crate::db_type::{ + DatabaseInnerKeyValue, DatabaseKeyDefinition, DatabaseSecondaryKeyOptions, Error, Result, +}; +use crate::table_definition::PrimaryTableDefinition; +use crate::transaction::internal::private_readable_transaction::PrivateReadableTransaction; +use crate::Model; +use std::collections::HashMap; + +pub struct InternalRTransaction<'db> { + pub(crate) redb_transaction: redb::ReadTransaction<'db>, + pub(crate) table_definitions: &'db HashMap>, +} + +impl<'db, 'txn> PrivateReadableTransaction<'db, 'txn> for InternalRTransaction<'db> +where + Self: 'txn, + Self: 'db, +{ + type RedbPrimaryTable = redb::ReadOnlyTable<'txn, DatabaseInnerKeyValue, &'static [u8]>; + type RedbSecondaryTable = + redb::ReadOnlyTable<'txn, DatabaseInnerKeyValue, DatabaseInnerKeyValue>; + + type RedbTransaction<'db_bis> = redb::ReadTransaction<'db> where Self: 'db_bis; + + fn table_definitions(&self) -> &HashMap { + &self.table_definitions + } + + fn get_primary_table(&'txn self, model: &Model) -> Result { + let table_definition = self + .table_definitions() + .get(model.primary_key.unique_table_name.as_str()) + .ok_or_else(|| Error::TableDefinitionNotFound { + table: model.primary_key.unique_table_name.to_string(), + })?; + let table = self.redb_transaction.open_table(table_definition.redb)?; + Ok(table) + } + + fn get_secondary_table( + &'txn self, + model: &Model, + secondary_key: &DatabaseKeyDefinition, + ) -> Result { + let main_table_definition = self + .table_definitions() + .get(model.primary_key.unique_table_name.as_str()) + .ok_or_else(|| Error::TableDefinitionNotFound { + table: model.primary_key.unique_table_name.to_string(), + })?; + let secondary_table_definition = main_table_definition + .secondary_tables + .get(&secondary_key) + .ok_or_else(|| Error::TableDefinitionNotFound { + table: secondary_key.unique_table_name.to_string(), + })?; + let table = self + .redb_transaction + .open_table(secondary_table_definition.redb)?; + Ok(table) + } +} diff --git a/src/transaction/internal/rw_transaction.rs b/src/transaction/internal/rw_transaction.rs new file mode 100644 index 00000000..4a6a30ff --- /dev/null +++ b/src/transaction/internal/rw_transaction.rs @@ -0,0 +1,294 @@ +use crate::db_type::{ + DatabaseInnerKeyValue, DatabaseInput, DatabaseKeyDefinition, DatabaseKeyValue, + DatabaseOutputValue, DatabaseSecondaryKeyOptions, Error, Result, +}; +use crate::table_definition::PrimaryTableDefinition; +use crate::transaction::internal::private_readable_transaction::PrivateReadableTransaction; +use crate::watch::WatcherRequest; +use crate::{Input, Model}; +use redb::ReadableTable; +use redb::TableHandle; +use std::collections::{HashMap, HashSet}; +use std::fmt::Debug; + +pub struct InternalRwTransaction<'db> { + pub(crate) redb_transaction: redb::WriteTransaction<'db>, + pub(crate) primary_table_definitions: &'db HashMap>, +} + +impl<'db, 'txn> PrivateReadableTransaction<'db, 'txn> for InternalRwTransaction<'db> +where + Self: 'txn, + Self: 'db, +{ + type RedbPrimaryTable = redb::Table<'db, 'txn, DatabaseInnerKeyValue, &'static [u8]>; + type RedbSecondaryTable = redb::Table<'db, 'txn, DatabaseInnerKeyValue, DatabaseInnerKeyValue>; + + type RedbTransaction<'db_bis> = redb::WriteTransaction<'db> where Self: 'db_bis; + + fn table_definitions(&self) -> &HashMap { + &self.primary_table_definitions + } + + fn get_primary_table(&'txn self, model: &Model) -> Result { + let table_definition = self + .table_definitions() + .get(model.primary_key.unique_table_name.as_str()) + .ok_or_else(|| Error::TableDefinitionNotFound { + table: model.primary_key.unique_table_name.to_string(), + })?; + let table = self.redb_transaction.open_table(table_definition.redb)?; + Ok(table) + } + + fn get_secondary_table( + &'txn self, + model: &Model, + secondary_key: &DatabaseKeyDefinition, + ) -> Result { + let main_table_definition = self + .table_definitions() + .get(model.primary_key.unique_table_name.as_str()) + .ok_or_else(|| Error::TableDefinitionNotFound { + table: model.primary_key.unique_table_name.to_string(), + })?; + let secondary_table_definition = main_table_definition + .secondary_tables + .get(&secondary_key) + .ok_or_else(|| Error::TableDefinitionNotFound { + table: secondary_key.unique_table_name.to_string(), + })?; + let table = self + .redb_transaction + .open_table(secondary_table_definition.redb)?; + Ok(table) + } +} + +impl<'db> InternalRwTransaction<'db> { + pub(crate) fn commit(self) -> Result<()> { + self.redb_transaction.commit()?; + Ok(()) + } + + pub(crate) fn concrete_insert( + &self, + model: Model, + item: DatabaseInput, + ) -> Result<(WatcherRequest, DatabaseOutputValue)> { + let already_exists; + { + let mut table = self.get_primary_table(&model)?; + already_exists = table + .insert(&item.primary_key, item.value.as_slice())? + .is_some(); + } + + for (secondary_key_def, _value) in &item.secondary_keys { + let mut secondary_table = self.get_secondary_table(&model, secondary_key_def)?; + let result = match item.secondary_key_value(secondary_key_def)? { + DatabaseKeyValue::Default(value) => { + secondary_table.insert(value, &item.primary_key)? + } + DatabaseKeyValue::Optional(value) => { + if let Some(value) = value { + secondary_table.insert(value, &item.primary_key)? + } else { + None + } + } + }; + if result.is_some() && !already_exists { + return Err(Error::DuplicateKey { + key_name: secondary_key_def.unique_table_name.to_string(), + } + .into()); + } + } + + Ok(( + WatcherRequest::new( + model.primary_key.unique_table_name.clone(), + item.primary_key, + item.secondary_keys, + ), + DatabaseOutputValue(item.value), + )) + } + + pub(crate) fn concrete_remove( + &self, + model: Model, + item: DatabaseInput, + ) -> Result<(WatcherRequest, DatabaseOutputValue)> { + let keys = &item.secondary_keys; + { + let mut table = self.get_primary_table(&model)?; + table.remove(&item.primary_key)?; + } + + for (secondary_key_def, _value) in keys { + let mut secondary_table = self.get_secondary_table(&model, secondary_key_def)?; + match &item.secondary_key_value(secondary_key_def)? { + DatabaseKeyValue::Default(value) => { + secondary_table.remove(value)?; + } + DatabaseKeyValue::Optional(value) => { + if let Some(value) = value { + secondary_table.remove(value)?; + } + } + } + } + + Ok(( + WatcherRequest::new( + model.primary_key.unique_table_name.clone(), + item.primary_key, + item.secondary_keys, + ), + DatabaseOutputValue(item.value), + )) + } + + pub(crate) fn concrete_update( + &self, + model: Model, + old_item: DatabaseInput, + updated_item: DatabaseInput, + ) -> Result<(WatcherRequest, DatabaseOutputValue, DatabaseOutputValue)> { + let (_, old_binary_value) = self.concrete_remove(model.clone(), old_item)?; + let (watcher_request, new_binary_value) = self.concrete_insert(model, updated_item)?; + Ok((watcher_request, old_binary_value, new_binary_value)) + } + + pub(crate) fn concrete_primary_drain<'a>( + &self, + model: Model, + ) -> Result> { + let mut items = vec![]; + let mut key_items = HashSet::new(); + + let mut primary_table = self.get_primary_table(&model)?; + // Drain primary table + let drain = primary_table.drain::(..)?; + for result in drain { + let (primary_key, value) = result?; + // TODO: we should delay to an drain scan + let binary_value = DatabaseOutputValue(value.value().to_vec()); + key_items.insert(primary_key.value().to_owned()); + items.push(binary_value); + } + + let secondary_table_names: Vec<&DatabaseKeyDefinition> = self + .primary_table_definitions + .get(model.primary_key.unique_table_name.as_str()) + .ok_or(Error::TableDefinitionNotFound { + table: model.primary_key.unique_table_name.to_string(), + })? + .secondary_tables + .iter() + .map(|(key, _)| key) + .collect(); + + // Drain secondary tables + for secondary_table_name in secondary_table_names { + let mut secondary_table = self.get_secondary_table(&model, secondary_table_name)?; + + // Detect secondary keys to delete + let mut secondary_keys_to_delete = vec![]; + let mut number_detected_key_to_delete = key_items.len(); + for secondary_items in secondary_table.iter()? { + // Ta avoid to iter on all secondary keys if we have already detected all keys to delete + if number_detected_key_to_delete == 0 { + break; + } + let (secondary_key, primary_key) = secondary_items?; + if key_items.contains(&primary_key.value().to_owned()) { + // TODO remove owned + secondary_keys_to_delete.push(secondary_key.value().to_owned()); + number_detected_key_to_delete -= 1; + } + } + + // Delete secondary keys + for secondary_key in secondary_keys_to_delete { + secondary_table.remove(secondary_key)?; + } + } + + Ok(items) + } + + pub fn migrate(&self) -> Result<()> { + let new_table_definition = self + .primary_table_definitions + .get(T::native_db_model().primary_key.unique_table_name.as_str()) + .unwrap(); + + dbg!(&new_table_definition); + if new_table_definition + .native_model_options + .native_model_legacy + { + return Err(Error::MigrateLegacyModel( + T::native_db_model() + .primary_key + .unique_table_name + .to_string(), + )); + } + + // Check which table have the data + let mut old_table_definition = None; + for new_primary_table_definition in self.primary_table_definitions.values() { + // check if table exists, if the table does not exist continue + if self + .redb_transaction + .list_tables()? + .find(|table| table.name() == new_primary_table_definition.redb.name()) + .is_none() + { + continue; + } + + let table = self + .redb_transaction + .open_table(new_primary_table_definition.redb.clone())?; + let len = table.len()?; + if len > 0 && old_table_definition.is_some() { + panic!( + "Impossible to migrate the table {} because the table {} has data", + T::native_db_model().primary_key.unique_table_name, + new_primary_table_definition.redb.name() + ); + } else if table.len()? > 0 { + old_table_definition = Some(new_primary_table_definition); + } + } + + let old_table_definition = if let Some(old_table_definition) = old_table_definition { + old_table_definition + } else { + // Nothing to migrate + return Ok(()); + }; + + // If the old table is the same as the new table, nothing to migrate + if old_table_definition.redb.name() + == T::native_db_model().primary_key.unique_table_name.as_str() + { + // Nothing to migrate + return Ok(()); + } + + // List all data from the old table + for old_data in self.concrete_primary_drain(old_table_definition.model.clone())? { + let (decoded_item, _) = native_model::decode::(old_data.0).unwrap(); + let decoded_item = decoded_item.to_item(); + self.concrete_insert(T::native_db_model(), decoded_item)?; + } + + Ok(()) + } +} diff --git a/src/transaction/mod.rs b/src/transaction/mod.rs new file mode 100644 index 00000000..54ed07cb --- /dev/null +++ b/src/transaction/mod.rs @@ -0,0 +1,7 @@ +pub(crate) mod internal; +pub mod query; +mod r_transaction; +mod rw_transaction; + +pub use r_transaction::*; +pub use rw_transaction::*; diff --git a/src/transaction/query/drain.rs b/src/transaction/query/drain.rs new file mode 100644 index 00000000..e1ada37a --- /dev/null +++ b/src/transaction/query/drain.rs @@ -0,0 +1,21 @@ +use crate::db_type::{DatabaseSecondaryKeyOptions, Input, KeyDefinition, Result}; +use crate::transaction::internal::rw_transaction::InternalRwTransaction; + +pub struct RwDrain<'db, 'txn> { + pub(crate) internal: &'txn InternalRwTransaction<'db>, +} + +impl<'db, 'txn> RwDrain<'db, 'txn> { + pub fn primary(&self) -> Result> { + let model = T::native_db_model(); + let out = self.internal.concrete_primary_drain(model)?; + Ok(out.into_iter().map(|b| b.inner()).collect()) + } + + pub fn secondary( + &self, + _key_def: impl KeyDefinition, + ) -> () { + todo!() + } +} diff --git a/src/transaction/query/get.rs b/src/transaction/query/get.rs new file mode 100644 index 00000000..f55b4d0c --- /dev/null +++ b/src/transaction/query/get.rs @@ -0,0 +1,48 @@ +use crate::db_type::{DatabaseSecondaryKeyOptions, InnerKeyValue, Input, KeyDefinition, Result}; +use crate::transaction::internal::private_readable_transaction::PrivateReadableTransaction; +use crate::transaction::internal::r_transaction::InternalRTransaction; +use crate::transaction::internal::rw_transaction::InternalRwTransaction; + +pub struct RGet<'db, 'txn> { + pub(crate) internal: &'txn InternalRTransaction<'db>, +} + +impl RGet<'_, '_> { + pub fn primary(&self, key: impl InnerKeyValue) -> Result> { + let model = T::native_db_model(); + let result = self.internal.get_by_primary_key(model, key)?; + Ok(result.map(|value| value.inner())) + } + + pub fn secondary( + &self, + key_def: impl KeyDefinition, + key: impl InnerKeyValue, + ) -> Result> { + let model = T::native_db_model(); + let result = self.internal.get_by_secondary_key(model, key_def, key)?; + Ok(result.map(|value| value.inner())) + } +} + +pub struct RwGet<'db, 'txn> { + pub(crate) internal: &'txn InternalRwTransaction<'db>, +} + +impl RwGet<'_, '_> { + pub fn primary(&self, key: impl InnerKeyValue) -> Result> { + let model = T::native_db_model(); + let result = self.internal.get_by_primary_key(model, key)?; + Ok(result.map(|value| value.inner())) + } + + pub fn secondary( + &self, + key_def: impl KeyDefinition, + key: impl InnerKeyValue, + ) -> Result> { + let model = T::native_db_model(); + let result = self.internal.get_by_secondary_key(model, key_def, key)?; + Ok(result.map(|value| value.inner())) + } +} diff --git a/src/transaction/query/len.rs b/src/transaction/query/len.rs new file mode 100644 index 00000000..a34a10e2 --- /dev/null +++ b/src/transaction/query/len.rs @@ -0,0 +1,42 @@ +use crate::db_type::{DatabaseSecondaryKeyOptions, Input, KeyDefinition, Result}; +use crate::transaction::internal::private_readable_transaction::PrivateReadableTransaction; +use crate::transaction::internal::r_transaction::InternalRTransaction; +use crate::transaction::internal::rw_transaction::InternalRwTransaction; + +pub struct RLen<'db, 'txn> { + pub(crate) internal: &'txn InternalRTransaction<'db>, +} + +impl RLen<'_, '_> { + pub fn primary(&self) -> Result { + let model = T::native_db_model(); + let result = self.internal.primary_len(model)?; + Ok(result) + } + + pub fn secondary( + &self, + _key_def: impl KeyDefinition, + ) -> Result> { + todo!() + } +} + +pub struct RwLen<'db, 'txn> { + pub(crate) internal: &'txn InternalRwTransaction<'db>, +} + +impl RwLen<'_, '_> { + pub fn primary(&self) -> Result { + let model = T::native_db_model(); + let result = self.internal.primary_len(model)?; + Ok(result) + } + + pub fn secondary( + &self, + _key_def: impl KeyDefinition, + ) -> Result> { + todo!() + } +} diff --git a/src/transaction/query/mod.rs b/src/transaction/query/mod.rs new file mode 100644 index 00000000..1161b819 --- /dev/null +++ b/src/transaction/query/mod.rs @@ -0,0 +1,9 @@ +mod drain; +mod get; +mod len; +mod scan; + +pub use drain::*; +pub use get::*; +pub use len::*; +pub use scan::*; diff --git a/src/transaction/query/scan/mod.rs b/src/transaction/query/scan/mod.rs new file mode 100644 index 00000000..130d6bd7 --- /dev/null +++ b/src/transaction/query/scan/mod.rs @@ -0,0 +1,79 @@ +mod primary_scan; +mod secondary_scan; + +use crate::db_type::{ + DatabaseInnerKeyValue, DatabaseSecondaryKeyOptions, Input, KeyDefinition, Result, +}; +pub use primary_scan::*; +pub use secondary_scan::*; + +use crate::transaction::internal::private_readable_transaction::PrivateReadableTransaction; +use crate::transaction::internal::r_transaction::InternalRTransaction; +use crate::transaction::internal::rw_transaction::InternalRwTransaction; + +pub struct RScan<'db, 'txn> { + pub(crate) internal: &'txn InternalRTransaction<'db>, +} + +impl<'txn> RScan<'_, 'txn> { + pub fn primary( + &self, + ) -> Result, T>> + { + let model = T::native_db_model(); + let table = self.internal.get_primary_table(&model)?; + let out = PrimaryScan::new(table); + Ok(out) + } + + pub fn secondary( + &self, + key_def: impl KeyDefinition, + ) -> Result< + SecondaryScan< + redb::ReadOnlyTable<'txn, DatabaseInnerKeyValue, &'static [u8]>, + redb::ReadOnlyTable<'txn, DatabaseInnerKeyValue, DatabaseInnerKeyValue>, + T, + >, + > { + let model = T::native_db_model(); + let primary_table = self.internal.get_primary_table(&model)?; + let secondary_key = key_def.database_key(); + let secondary_table = self.internal.get_secondary_table(&model, &secondary_key)?; + let out = SecondaryScan::new(primary_table, secondary_table); + Ok(out) + } +} + +pub struct RwScan<'db, 'txn> { + pub(crate) internal: &'txn InternalRwTransaction<'db>, +} + +impl<'db, 'txn> RwScan<'db, 'txn> { + pub fn primary( + &self, + ) -> Result, T>> { + let model = T::native_db_model(); + let table = self.internal.get_primary_table(&model)?; + let out = PrimaryScan::new(table); + Ok(out) + } + + pub fn secondary( + &self, + key_def: impl KeyDefinition, + ) -> Result< + SecondaryScan< + redb::Table<'db, 'txn, DatabaseInnerKeyValue, &'static [u8]>, + redb::Table<'db, 'txn, DatabaseInnerKeyValue, DatabaseInnerKeyValue>, + T, + >, + > { + let model = T::native_db_model(); + let primary_table = self.internal.get_primary_table(&model)?; + let secondary_key = key_def.database_key(); + let secondary_table = self.internal.get_secondary_table(&model, &secondary_key)?; + let out = SecondaryScan::new(primary_table, secondary_table); + Ok(out) + } +} diff --git a/src/transaction/query/scan/primary_scan.rs b/src/transaction/query/scan/primary_scan.rs new file mode 100644 index 00000000..d4f591e6 --- /dev/null +++ b/src/transaction/query/scan/primary_scan.rs @@ -0,0 +1,112 @@ +use crate::db_type::{unwrap_item, DatabaseInnerKeyValue, DatabaseInnerKeyValueRange, Input}; +use crate::InnerKeyValue; +use std::marker::PhantomData; +use std::ops::RangeBounds; + +pub struct PrimaryScan +where + PrimaryTable: redb::ReadableTable, +{ + pub(crate) primary_table: PrimaryTable, + pub(crate) _marker: PhantomData, +} + +impl PrimaryScan +where + PrimaryTable: redb::ReadableTable, +{ + pub fn new(table: PrimaryTable) -> Self { + Self { + primary_table: table, + _marker: PhantomData::default(), + } + } + + pub fn iter(&self) -> PrimaryScanIterator { + let range = self + .primary_table + .range::(..) + .unwrap(); + PrimaryScanIterator { + range, + _marker: PhantomData::default(), + } + } + + // pub fn range>(&self, range: R) -> PrimaryScanIterator { + pub fn range>(&self, range: R) -> PrimaryScanIterator { + let database_inner_key_value_range = DatabaseInnerKeyValueRange::new(range); + let range = self + .primary_table + .range::(database_inner_key_value_range) + .unwrap(); + PrimaryScanIterator { + range, + _marker: PhantomData::default(), + } + } + + pub fn start_with<'a>( + &'a self, + start_with: impl InnerKeyValue + 'a, + ) -> PrimaryScanIteratorStartWith<'a, T> { + let start_with = start_with.database_inner_key_value(); + let range = self + .primary_table + .range::(start_with.clone()..) + .unwrap(); + PrimaryScanIteratorStartWith { + start_with, + range, + _marker: PhantomData::default(), + } + } +} + +pub struct PrimaryScanIterator<'a, T: Input> { + pub(crate) range: redb::Range<'a, DatabaseInnerKeyValue, &'static [u8]>, + pub(crate) _marker: PhantomData, +} + +impl<'a, T: Input> Iterator for PrimaryScanIterator<'a, T> { + type Item = T; + + fn next(&mut self) -> Option { + match self.range.next() { + Some(Ok((_, v))) => unwrap_item(Some(v)), + _ => None, + } + } +} +impl<'a, T: Input> DoubleEndedIterator for PrimaryScanIterator<'a, T> { + fn next_back(&mut self) -> Option { + match self.range.next_back() { + Some(Ok((_, v))) => unwrap_item(Some(v)), + _ => None, + } + } +} + +pub struct PrimaryScanIteratorStartWith<'a, T: Input> { + pub(crate) range: redb::Range<'a, DatabaseInnerKeyValue, &'static [u8]>, + pub(crate) start_with: DatabaseInnerKeyValue, + pub(crate) _marker: PhantomData, +} + +impl<'a, T: Input> Iterator for PrimaryScanIteratorStartWith<'a, T> { + type Item = T; + + fn next(&mut self) -> Option { + match self.range.next() { + Some(Ok((k, v))) => { + let k = k.value(); + if k.as_slice().starts_with(self.start_with.as_slice()) { + unwrap_item(Some(v)) + } else { + None + } + } + _ => None, + } + } +} diff --git a/src/transaction/query/scan/secondary_scan.rs b/src/transaction/query/scan/secondary_scan.rs new file mode 100644 index 00000000..b5d47471 --- /dev/null +++ b/src/transaction/query/scan/secondary_scan.rs @@ -0,0 +1,151 @@ +use crate::db_type::{unwrap_item, DatabaseInnerKeyValue, DatabaseInnerKeyValueRange, Input}; +use crate::InnerKeyValue; +use redb; +use std::marker::PhantomData; +use std::ops::RangeBounds; + +pub struct SecondaryScan +where + PrimaryTable: redb::ReadableTable, + SecondaryTable: redb::ReadableTable, +{ + pub(crate) primary_table: PrimaryTable, + pub(crate) secondary_table: SecondaryTable, + pub(crate) _marker: PhantomData, +} + +impl SecondaryScan +where + PrimaryTable: redb::ReadableTable, + SecondaryTable: redb::ReadableTable, +{ + pub fn new(primary_table: PrimaryTable, secondary_table: SecondaryTable) -> Self { + Self { + primary_table, + secondary_table, + _marker: PhantomData::default(), + } + } + + pub fn all(&self) -> SecondaryScanIterator { + let range = self + .secondary_table + .range::(..) + .unwrap(); + SecondaryScanIterator { + primary_table: &self.primary_table, + range, + _marker: PhantomData::default(), + } + } + + pub fn range>( + &self, + range: R, + ) -> SecondaryScanIterator { + let database_inner_key_value_range = DatabaseInnerKeyValueRange::new(range); + let range = self + .secondary_table + .range::(database_inner_key_value_range) + .unwrap(); + SecondaryScanIterator { + primary_table: &self.primary_table, + range, + _marker: PhantomData::default(), + } + } + + pub fn start_with<'a>( + &'a self, + start_with: impl InnerKeyValue + 'a, + ) -> SecondaryScanIteratorStartWith<'a, PrimaryTable, T> { + let start_with = start_with.database_inner_key_value(); + let range = self + .secondary_table + .range::(start_with.clone()..) + .unwrap(); + SecondaryScanIteratorStartWith { + primary_table: &self.primary_table, + start_with, + range, + _marker: PhantomData::default(), + } + } +} + +pub struct SecondaryScanIterator<'a, PrimaryTable, T: Input> +where + PrimaryTable: redb::ReadableTable, +{ + pub(crate) primary_table: &'a PrimaryTable, + pub(crate) range: redb::Range<'a, DatabaseInnerKeyValue, DatabaseInnerKeyValue>, + pub(crate) _marker: PhantomData, +} + +impl<'a, PrimaryTable, T: Input> Iterator for SecondaryScanIterator<'a, PrimaryTable, T> +where + PrimaryTable: redb::ReadableTable, +{ + type Item = T; + + fn next(&mut self) -> Option { + match self.range.next() { + Some(Ok((_, key))) => { + if let Ok(value) = self.primary_table.get(key.value()) { + unwrap_item(value) + } else { + None + } + } + _ => None, + } + } +} + +impl<'a, PrimaryTable, T: Input> DoubleEndedIterator for SecondaryScanIterator<'a, PrimaryTable, T> +where + PrimaryTable: redb::ReadableTable, +{ + fn next_back(&mut self) -> Option { + match self.range.next_back() { + Some(Ok((_, key))) => unwrap_item(self.primary_table.get(key.value()).unwrap()), + _ => None, + } + } +} + +pub struct SecondaryScanIteratorStartWith<'a, PrimaryTable, T> +where + PrimaryTable: redb::ReadableTable, + T: Input, +{ + pub(crate) primary_table: &'a PrimaryTable, + pub(crate) start_with: DatabaseInnerKeyValue, + pub(crate) range: redb::Range<'a, DatabaseInnerKeyValue, DatabaseInnerKeyValue>, + pub(crate) _marker: PhantomData, +} + +impl<'a, PrimaryTable, T> Iterator for SecondaryScanIteratorStartWith<'a, PrimaryTable, T> +where + PrimaryTable: redb::ReadableTable, + T: Input, +{ + type Item = T; + + fn next(&mut self) -> Option { + match self.range.next() { + Some(Ok((secondary_key, primary_key))) => { + if secondary_key + .value() + .as_slice() + .starts_with(self.start_with.as_slice()) + { + unwrap_item(self.primary_table.get(primary_key.value()).unwrap()) + } else { + None + } + } + _ => None, + } + } +} diff --git a/src/transaction/r_transaction.rs b/src/transaction/r_transaction.rs new file mode 100644 index 00000000..f67fa2f3 --- /dev/null +++ b/src/transaction/r_transaction.rs @@ -0,0 +1,28 @@ +use crate::transaction::internal::r_transaction::InternalRTransaction; +use crate::transaction::query::RGet; +use crate::transaction::query::RLen; +use crate::transaction::query::RScan; + +pub struct RTransaction<'db> { + pub(crate) internal: InternalRTransaction<'db>, +} + +impl<'db> RTransaction<'db> { + pub fn get<'txn>(&'txn self) -> RGet<'db, 'txn> { + RGet { + internal: &self.internal, + } + } + + pub fn scan<'txn>(&'txn self) -> RScan<'db, 'txn> { + RScan { + internal: &self.internal, + } + } + + pub fn len<'txn>(&'txn self) -> RLen<'db, 'txn> { + RLen { + internal: &self.internal, + } + } +} diff --git a/src/transaction/rw_transaction.rs b/src/transaction/rw_transaction.rs new file mode 100644 index 00000000..339d6d45 --- /dev/null +++ b/src/transaction/rw_transaction.rs @@ -0,0 +1,174 @@ +use crate::db_type::{Input, Result}; +use crate::transaction::internal::rw_transaction::InternalRwTransaction; +use crate::transaction::query::RwDrain; +use crate::transaction::query::RwGet; +use crate::transaction::query::RwLen; +use crate::transaction::query::RwScan; +use crate::watch; +use crate::watch::Event; +use std::cell::RefCell; +use std::fmt::Debug; +use std::sync::{Arc, RwLock}; + +pub struct RwTransaction<'db> { + pub(crate) watcher: &'db Arc>, + pub(crate) batch: RefCell, + pub(crate) internal: InternalRwTransaction<'db>, +} + +impl<'db> RwTransaction<'db> { + pub fn get<'txn>(&'txn self) -> RwGet<'db, 'txn> { + RwGet { + internal: &self.internal, + } + } + + pub fn scan<'txn>(&'txn self) -> RwScan<'db, 'txn> { + RwScan { + internal: &self.internal, + } + } + + pub fn len<'txn>(&'txn self) -> RwLen<'db, 'txn> { + RwLen { + internal: &self.internal, + } + } + + pub fn drain<'txn>(&'txn self) -> RwDrain<'db, 'txn> { + RwDrain { + internal: &self.internal, + } + } +} + +impl<'db, 'txn> RwTransaction<'db> { + pub fn commit(self) -> Result<()> { + self.internal.commit()?; + // Send batch to watchers after commit succeeds + let batch = self.batch.into_inner(); + watch::push_batch(Arc::clone(&self.watcher), batch)?; + Ok(()) + } +} + +impl<'db, 'txn> RwTransaction<'db> { + pub fn insert(&self, item: T) -> Result<()> { + let (watcher_request, binary_value) = self + .internal + .concrete_insert(T::native_db_model(), item.to_item())?; + let event = Event::new_insert(binary_value); + self.batch.borrow_mut().add(watcher_request, event); + Ok(()) + } + + pub fn remove(&self, item: T) -> Result<()> { + let (watcher_request, binary_value) = self + .internal + .concrete_remove(T::native_db_model(), item.to_item())?; + let event = Event::new_delete(binary_value); + self.batch.borrow_mut().add(watcher_request, event); + Ok(()) + } + + pub fn update(&self, old_item: T, updated_item: T) -> Result<()> { + let (watcher_request, old_binary_value, new_binary_value) = self.internal.concrete_update( + T::native_db_model(), + old_item.to_item(), + updated_item.to_item(), + )?; + let event = Event::new_update(old_binary_value, new_binary_value); + self.batch.borrow_mut().add(watcher_request, event); + Ok(()) + } + + pub fn convert_all(&self) -> Result<()> + where + OldType: Input + Clone, + NewType: Input + From, + { + let find_all_old: Vec = self.scan().primary()?.iter().collect(); + for old in find_all_old { + let new: NewType = old.clone().into(); + self.internal + .concrete_insert(NewType::native_db_model(), new.to_item())?; + self.internal + .concrete_remove(OldType::native_db_model(), old.to_item())?; + } + Ok(()) + } + + /// Automatically migrate the data from the old schema to the new schema. **No matter the state of the database**, + /// if all models remain defined in the application as they are, the data will be migrated to the most recent version automatically. + /// + /// Native DB use the [`native_model`](https://crates.io/crates/native_model) identifier `id` to identify the model and `version` to identify the version of the model. + /// We can define a model with the same identifier `id` but with a different version `version`. + /// + /// In the example below we define one model with the identifier `id=1` with tow versions `version=1` and `version=2`. + /// - You **must** link the previous version from the new one with `from` option like `#[native_model(id=1, version=2, from=LegacyData)]`. + /// - You **must** define the interoperability between the two versions with implement `From for Data` and `From for LegacyData` or implement `TryFrom for Data` and `TryFrom for LegacyData`. + /// - You **must** define all models (by calling [`define`](#method.define)) before to call [`migration`](#method.migrate). + /// - You **must** call use the most recent/bigger version as the target version when you call [`migration`](#method.migrate): `migration::()`. + /// That means you can't call `migration::()` because `LegacyData` has version `1` and `Data` has version `2`. + /// + /// After call `migration::()` all data of the model `LegacyData` will be migrated to the model `Data`. + /// + /// Under the hood, when you call [`migration`](#method.migrate) `native_model` is used to convert the data from the old model to the new model + /// using the `From` or `TryFrom` implementation for each to target the version defined when you call [`migration::()`](#method.migrate). + /// + /// It's advisable to perform all migrations within a **single transaction** to ensure that all migrations are successfully completed. + /// + /// # Example + /// ```rust + /// use native_db::*; + /// use native_model::{native_model, Model}; + /// use serde::{Deserialize, Serialize}; + /// + /// #[derive(Serialize, Deserialize, Debug)] + /// #[native_model(id=1, version=1)] + /// #[native_db] + /// struct LegacyData { + /// #[primary_key] + /// id: u32, + /// } + /// + /// impl From for LegacyData { + /// fn from(data: Data) -> Self { + /// LegacyData { + /// id: data.id as u32, + /// } + /// } + /// } + /// + /// #[derive(Serialize, Deserialize, Debug)] + /// #[native_model(id=1, version=2, from=LegacyData)] + /// #[native_db] + /// struct Data { + /// #[primary_key] + /// id: u64, + /// } + /// + /// impl From for Data { + /// fn from(legacy_data: LegacyData) -> Self { + /// Data { + /// id: legacy_data.id as u64, + /// } + /// } + /// } + /// + /// fn main() -> Result<(), db_type::Error> { + /// let mut builder = DatabaseBuilder::new(); + /// builder.define::()?; + /// builder.define::()?; + /// let db = builder.create_in_memory()?; + /// + /// let rw = db.rw_transaction()?; + /// rw.migrate::()?; + /// // Other migrations if needed.. + /// rw.commit() + /// } + /// ``` + pub fn migrate(&self) -> Result<()> { + self.internal.migrate::() + } +} diff --git a/src/watch/batch.rs b/src/watch/batch.rs index 3dcf613e..d04f3ea5 100644 --- a/src/watch/batch.rs +++ b/src/watch/batch.rs @@ -14,7 +14,7 @@ impl Batch { } } -impl Iterator for Batch { +impl<'a> Iterator for Batch { type Item = (WatcherRequest, Event); fn next(&mut self) -> Option { @@ -26,12 +26,7 @@ impl Debug for Batch { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "[")?; for (watcher_request, event) in &self.0 { - write!( - f, - "({}, {:?}), ", - String::from_utf8_lossy(&watcher_request.primary_key), - event - )?; + write!(f, "({:?}, {:?}), ", watcher_request.primary_key, event)?; } write!(f, "]") } diff --git a/src/watch/event.rs b/src/watch/event.rs index bcb2e8bd..abe04534 100644 --- a/src/watch/event.rs +++ b/src/watch/event.rs @@ -1,4 +1,4 @@ -use crate::{BinaryValue, SDBItem}; +use crate::db_type::{DatabaseOutputValue, Input}; use std::fmt::Debug; #[derive(Clone)] @@ -9,18 +9,21 @@ pub enum Event { } impl Event { - pub(crate) fn new_insert(value: BinaryValue) -> Self { + pub(crate) fn new_insert(value: DatabaseOutputValue) -> Self { Self::Insert(Insert(value)) } - pub(crate) fn new_update(old_value: BinaryValue, new_value: BinaryValue) -> Self { + pub(crate) fn new_update( + old_value: DatabaseOutputValue, + new_value: DatabaseOutputValue, + ) -> Self { Self::Update(Update { old: old_value, new: new_value, }) } - pub(crate) fn new_delete(value: BinaryValue) -> Self { + pub(crate) fn new_delete(value: DatabaseOutputValue) -> Self { Self::Delete(Delete(value)) } } @@ -36,34 +39,34 @@ impl Debug for Event { } #[derive(Clone)] -pub struct Insert(pub(crate) BinaryValue); +pub struct Insert(pub(crate) DatabaseOutputValue); impl Insert { - pub fn inner(&self) -> T { + pub fn inner(&self) -> T { self.0.inner() } } #[derive(Clone)] pub struct Update { - pub(crate) old: BinaryValue, - pub(crate) new: BinaryValue, + pub(crate) old: DatabaseOutputValue, + pub(crate) new: DatabaseOutputValue, } impl Update { - pub fn inner_old(&self) -> T { + pub fn inner_old(&self) -> T { self.old.inner() } - pub fn inner_new(&self) -> T { + pub fn inner_new(&self) -> T { self.new.inner() } } #[derive(Clone)] -pub struct Delete(pub(crate) BinaryValue); +pub struct Delete(pub(crate) DatabaseOutputValue); impl Delete { - pub fn inner(&self) -> T { + pub fn inner(&self) -> T { self.0.inner() } } diff --git a/src/watch/filter.rs b/src/watch/filter.rs index c0555964..4571343d 100644 --- a/src/watch/filter.rs +++ b/src/watch/filter.rs @@ -1,59 +1,64 @@ -use crate::KeyDefinition; +use crate::db_type::{ + DatabaseInnerKeyValue, DatabaseKeyDefinition, DatabaseSecondaryKeyOptions, KeyDefinition, +}; #[derive(Eq, PartialEq, Clone)] pub(crate) struct TableFilter { - pub(crate) table_name: &'static [u8], + pub(crate) table_name: String, pub(crate) key_filter: KeyFilter, } #[derive(Eq, PartialEq, Clone)] pub(crate) enum KeyFilter { - Primary(Option>), - PrimaryStartWith(Vec), - Secondary(Vec, Option>), - SecondaryStartWith(Vec, Vec), + Primary(Option), + PrimaryStartWith(DatabaseInnerKeyValue), + Secondary( + DatabaseKeyDefinition, + Option, + ), + SecondaryStartWith( + DatabaseKeyDefinition, + DatabaseInnerKeyValue, + ), } impl TableFilter { - pub(crate) fn new_primary(table_name: &'static [u8], key: Option<&[u8]>) -> Self { + pub(crate) fn new_primary(table_name: String, key: Option) -> Self { Self { table_name, - key_filter: KeyFilter::Primary(key.map(|k| k.to_vec())), + key_filter: KeyFilter::Primary(key.map(|k| k.to_owned())), } } - pub(crate) fn new_primary_start_with(table_name: &'static [u8], key_prefix: &[u8]) -> Self { + pub(crate) fn new_primary_start_with( + table_name: String, + key_prefix: DatabaseInnerKeyValue, + ) -> Self { Self { table_name, - key_filter: KeyFilter::PrimaryStartWith(key_prefix.to_vec()), + key_filter: KeyFilter::PrimaryStartWith(key_prefix.to_owned()), } } - pub(crate) fn new_secondary( - table_name: &'static [u8], - key_def: K, - key: Option<&[u8]>, + pub(crate) fn new_secondary>( + table_name: String, + key_def: &K, + key: Option, ) -> Self { Self { table_name, - key_filter: KeyFilter::Secondary( - key_def.secondary_table_name().as_bytes().to_vec(), - key.map(|v| v.to_vec()), - ), + key_filter: KeyFilter::Secondary(key_def.database_key(), key.map(|k| k.to_owned())), } } - pub(crate) fn new_secondary_start_with( - table_name: &'static [u8], - key: K, - key_prefix: &[u8], + pub(crate) fn new_secondary_start_with>( + table_name: String, + key: &K, + key_prefix: DatabaseInnerKeyValue, ) -> Self { Self { table_name, - key_filter: KeyFilter::SecondaryStartWith( - key.secondary_table_name().as_bytes().to_vec(), - key_prefix.to_vec(), - ), + key_filter: KeyFilter::SecondaryStartWith(key.database_key(), key_prefix.to_owned()), } } } diff --git a/src/watch/mod.rs b/src/watch/mod.rs index a8f1a5c0..7a6e7ec6 100644 --- a/src/watch/mod.rs +++ b/src/watch/mod.rs @@ -1,6 +1,7 @@ mod batch; mod event; mod filter; +pub mod query; mod request; mod sender; @@ -16,9 +17,11 @@ use thiserror::Error; #[derive(Error, Debug)] pub enum WatchEventError { #[error("TryLockErrorPoisoned")] - TryLockErrorPoisoned(Batch), + // TryLockErrorPoisoned(Batch<'a>), // TODO: remove 'a lifetime from Batch Error + TryLockErrorPoisoned, #[error("TryLockErrorWouldBlock")] - TryLockErrorWouldBlock(Batch), + // TryLockErrorWouldBlock(Batch<'a>), // TODO: remove 'a lifetime from Batch Error + TryLockErrorWouldBlock, #[cfg(not(feature = "tokio"))] #[error("SendError")] SendError(#[from] std::sync::mpsc::SendError), @@ -42,8 +45,8 @@ pub(crate) fn push_batch( batch: Batch, ) -> Result<(), WatchEventError> { let watchers = senders.try_read().map_err(|err| match err { - TryLockError::Poisoned(_) => WatchEventError::TryLockErrorPoisoned(batch.clone()), - TryLockError::WouldBlock => WatchEventError::TryLockErrorWouldBlock(batch.clone()), + TryLockError::Poisoned(_) => WatchEventError::TryLockErrorPoisoned, + TryLockError::WouldBlock => WatchEventError::TryLockErrorWouldBlock, })?; for (watcher_request, event) in batch { diff --git a/src/watch/query/get.rs b/src/watch/query/get.rs new file mode 100644 index 00000000..cc1398ac --- /dev/null +++ b/src/watch/query/get.rs @@ -0,0 +1,25 @@ +use crate::db_type::{DatabaseSecondaryKeyOptions, InnerKeyValue, Input, KeyDefinition, Result}; +use crate::watch; +use crate::watch::query::internal; +use crate::watch::MpscReceiver; + +pub struct WatchGet<'db, 'w> { + pub(crate) internal: &'w internal::InternalWatch<'db>, +} + +impl WatchGet<'_, '_> { + pub fn primary( + &self, + key: impl InnerKeyValue, + ) -> Result<(MpscReceiver, u64)> { + self.internal.watch_primary::(key) + } + + pub fn secondary( + &self, + key_def: impl KeyDefinition, + key: impl InnerKeyValue, + ) -> Result<(MpscReceiver, u64)> { + self.internal.watch_secondary::(&key_def, key) + } +} diff --git a/src/watch/query/internal.rs b/src/watch/query/internal.rs new file mode 100644 index 00000000..54ff596a --- /dev/null +++ b/src/watch/query/internal.rs @@ -0,0 +1,106 @@ +use crate::db_type::{ + DatabaseSecondaryKeyOptions, Error, InnerKeyValue, Input, KeyDefinition, Result, +}; +use crate::watch; +use crate::watch::{MpscReceiver, TableFilter}; +use std::sync::atomic::AtomicU64; +use std::sync::{Arc, Mutex, RwLock}; + +pub(crate) struct InternalWatch<'db> { + pub(crate) watchers: &'db Arc>, + pub(crate) watchers_counter_id: &'db AtomicU64, +} + +impl InternalWatch<'_> { + fn watch_generic( + &self, + table_filter: watch::TableFilter, + ) -> Result<(MpscReceiver, u64)> { + #[cfg(not(feature = "tokio"))] + let (event_sender, event_receiver) = std::sync::mpsc::channel(); + #[cfg(feature = "tokio")] + let (event_sender, event_receiver) = tokio::sync::mpsc::unbounded_channel(); + let event_sender = Arc::new(Mutex::new(event_sender)); + let id = self.generate_watcher_id()?; + let mut watchers = self.watchers.write().unwrap(); + watchers.add_sender(id, &table_filter, Arc::clone(&event_sender)); + drop(watchers); + Ok((event_receiver, id)) + } + + fn generate_watcher_id(&self) -> Result { + let value = self + .watchers_counter_id + .fetch_add(1, std::sync::atomic::Ordering::SeqCst); + if value == u64::MAX { + Err(Error::MaxWatcherReached.into()) + } else { + Ok(value) + } + } + + pub(crate) fn watch_primary( + &self, + key: impl InnerKeyValue, + ) -> Result<(MpscReceiver, u64)> { + let table_name = T::native_db_model().primary_key; + let key = key.database_inner_key_value(); + let table_filter = + TableFilter::new_primary(table_name.unique_table_name.clone(), Some(key)); + self.watch_generic(table_filter) + } + + pub(crate) fn watch_primary_all(&self) -> Result<(MpscReceiver, u64)> { + let table_name = T::native_db_model().primary_key; + let table_filter = TableFilter::new_primary(table_name.unique_table_name.clone(), None); + self.watch_generic(table_filter) + } + + pub(crate) fn watch_primary_start_with( + &self, + start_with: impl InnerKeyValue, + ) -> Result<(MpscReceiver, u64)> { + let table_name = T::native_db_model().primary_key; + let start_with = start_with.database_inner_key_value(); + let table_filter = + TableFilter::new_primary_start_with(table_name.unique_table_name.clone(), start_with); + self.watch_generic(table_filter) + } + + pub(crate) fn watch_secondary( + &self, + key_def: &impl KeyDefinition, + key: impl InnerKeyValue, + ) -> Result<(MpscReceiver, u64)> { + let table_name = T::native_db_model().primary_key; + let key = key.database_inner_key_value(); + let table_filter = + TableFilter::new_secondary(table_name.unique_table_name.clone(), key_def, Some(key)); + self.watch_generic(table_filter) + } + + pub(crate) fn watch_secondary_all( + &self, + key_def: &impl KeyDefinition, + ) -> Result<(MpscReceiver, u64)> { + let table_name = T::native_db_model().primary_key; + let table_filter = + TableFilter::new_secondary(table_name.unique_table_name.clone(), key_def, None); + self.watch_generic(table_filter) + } + + pub(crate) fn watch_secondary_start_with( + &self, + key_def: &impl KeyDefinition, + start_with: impl InnerKeyValue, + ) -> Result<(MpscReceiver, u64)> { + let table_name = T::native_db_model().primary_key; + let start_with = start_with.database_inner_key_value(); + let table_filter = TableFilter::new_secondary_start_with( + table_name.unique_table_name.clone(), + key_def, + start_with, + ); + self.watch_generic(table_filter) + } +} diff --git a/src/watch/query/mod.rs b/src/watch/query/mod.rs new file mode 100644 index 00000000..2d1d8159 --- /dev/null +++ b/src/watch/query/mod.rs @@ -0,0 +1,25 @@ +mod get; +mod internal; +mod scan; + +pub use get::*; +pub(crate) use internal::*; +pub use scan::*; + +pub struct Watch<'db> { + pub(crate) internal: InternalWatch<'db>, +} + +impl<'db> Watch<'db> { + pub fn get<'w>(&'w self) -> WatchGet<'db, 'w> { + WatchGet { + internal: &self.internal, + } + } + + pub fn scan<'w>(&'w self) -> WatchScan<'db, 'w> { + WatchScan { + internal: &self.internal, + } + } +} diff --git a/src/watch/query/scan.rs b/src/watch/query/scan.rs new file mode 100644 index 00000000..f94359c3 --- /dev/null +++ b/src/watch/query/scan.rs @@ -0,0 +1,78 @@ +use crate::db_type::{ + DatabaseKeyDefinition, DatabaseSecondaryKeyOptions, InnerKeyValue, Input, KeyDefinition, Result, +}; +use crate::watch; +use crate::watch::query::internal; +use crate::watch::MpscReceiver; +use std::ops::RangeBounds; + +pub struct WatchScan<'db, 'w> { + pub(crate) internal: &'w internal::InternalWatch<'db>, +} + +impl WatchScan<'_, '_> { + pub fn primary(&self) -> WatchScanPrimary { + WatchScanPrimary { + internal: &self.internal, + } + } + + pub fn secondary( + &self, + key_def: impl KeyDefinition, + ) -> WatchScanSecondary { + WatchScanSecondary { + key_def: key_def.database_key(), + internal: &self.internal, + } + } +} + +pub struct WatchScanPrimary<'db, 'w> { + pub(crate) internal: &'w internal::InternalWatch<'db>, +} + +impl WatchScanPrimary<'_, '_> { + pub fn all(&self) -> Result<(MpscReceiver, u64)> { + self.internal.watch_primary_all::() + } + pub fn range<'a>( + &self, + _range: impl RangeBounds<&'a [u8]> + 'a, + ) -> Result<(MpscReceiver, u64)> { + todo!() + } + + pub fn start_with( + &self, + start_with: impl InnerKeyValue, + ) -> Result<(MpscReceiver, u64)> { + self.internal.watch_primary_start_with::(start_with) + } +} + +pub struct WatchScanSecondary<'db, 'w> { + pub(crate) key_def: DatabaseKeyDefinition, + pub(crate) internal: &'w internal::InternalWatch<'db>, +} + +impl WatchScanSecondary<'_, '_> { + pub fn all<'ws, T: Input>(&'ws self) -> Result<(MpscReceiver, u64)> { + self.internal.watch_secondary_all::(&self.key_def) + } + + pub fn range<'a, 'ws>( + &'ws self, + _range: impl RangeBounds<&'a [u8]> + 'a, + ) -> Result<(MpscReceiver, u64)> { + todo!() + } + + pub fn start_with( + &self, + start_with: impl InnerKeyValue, + ) -> Result<(MpscReceiver, u64)> { + self.internal + .watch_secondary_start_with::(&self.key_def, start_with) + } +} diff --git a/src/watch/request.rs b/src/watch/request.rs index 234aff31..7ac84837 100644 --- a/src/watch/request.rs +++ b/src/watch/request.rs @@ -1,20 +1,28 @@ +use crate::db_type::{ + DatabaseInnerKeyValue, DatabaseKeyDefinition, DatabaseKeyValue, DatabaseSecondaryKeyOptions, +}; use std::collections::HashMap; #[derive(Clone)] pub struct WatcherRequest { - pub(crate) table_name: &'static [u8], - pub(crate) primary_key: Vec, - pub(crate) secondary_keys_value: HashMap<&'static str, Vec>, + // TODO: Maybe replace table_name by DatabaseKeyDefinition<()> or other + pub(crate) table_name: String, + pub(crate) primary_key: DatabaseInnerKeyValue, + pub(crate) secondary_keys_value: + HashMap, DatabaseKeyValue>, } impl WatcherRequest { pub fn new( - table_name: &'static str, - primary_key: Vec, - secondary_keys: HashMap<&'static str, Vec>, + table_name: String, + primary_key: DatabaseInnerKeyValue, + secondary_keys: HashMap< + DatabaseKeyDefinition, + DatabaseKeyValue, + >, ) -> Self { Self { - table_name: table_name.as_bytes(), + table_name, primary_key, secondary_keys_value: secondary_keys, } diff --git a/src/watch/sender.rs b/src/watch/sender.rs index e9c67dc2..545199ad 100644 --- a/src/watch/sender.rs +++ b/src/watch/sender.rs @@ -1,3 +1,4 @@ +use crate::db_type::DatabaseKeyValue; use crate::watch::filter::{KeyFilter, TableFilter}; use crate::watch::request::WatcherRequest; use crate::watch::{Event, MpscSender}; @@ -42,7 +43,11 @@ impl Watchers { } } KeyFilter::PrimaryStartWith(key_prefix) => { - if request.primary_key.starts_with(key_prefix) { + if request + .primary_key + .as_slice() + .starts_with(key_prefix.as_slice()) + { event_senders.push(Arc::clone(event_sender)); } } @@ -50,10 +55,21 @@ impl Watchers { for (request_secondary_key_def, request_secondary_key) in &request.secondary_keys_value { - if key_def == request_secondary_key_def.as_bytes() { - if let Some(value) = &key { - if value == request_secondary_key { - event_senders.push(Arc::clone(event_sender)); + if key_def == request_secondary_key_def { + if let Some(filter_value) = &key { + match request_secondary_key { + DatabaseKeyValue::Default(value) => { + if value == filter_value { + event_senders.push(Arc::clone(event_sender)); + } + } + DatabaseKeyValue::Optional(value) => { + if let Some(value) = value { + if value == filter_value { + event_senders.push(Arc::clone(event_sender)); + } + } + } } } else { event_senders.push(Arc::clone(event_sender)); @@ -65,9 +81,22 @@ impl Watchers { for (request_secondary_key_def, request_secondary_key) in &request.secondary_keys_value { - if key_def == request_secondary_key_def.as_bytes() { - if request_secondary_key.starts_with(key_prefix) { - event_senders.push(Arc::clone(event_sender)); + match request_secondary_key { + DatabaseKeyValue::Default(value) => { + if key_def == request_secondary_key_def { + if value.as_slice().starts_with(key_prefix.as_slice()) { + event_senders.push(Arc::clone(event_sender)); + } + } + } + DatabaseKeyValue::Optional(value) => { + if let Some(value) = value { + if key_def == request_secondary_key_def { + if value.as_slice().starts_with(key_prefix.as_slice()) { + event_senders.push(Arc::clone(event_sender)); + } + } + } } } } diff --git a/struct_db_macro/README.md b/struct_db_macro/README.md deleted file mode 100644 index 2e1175df..00000000 --- a/struct_db_macro/README.md +++ /dev/null @@ -1 +0,0 @@ -A procedural macro for struct_db \ No newline at end of file diff --git a/struct_db_macro/src/lib.rs b/struct_db_macro/src/lib.rs deleted file mode 100644 index 89b73a90..00000000 --- a/struct_db_macro/src/lib.rs +++ /dev/null @@ -1,14 +0,0 @@ -extern crate proc_macro; - -mod model_attributes; -mod model_struct_db; -mod struct_db; - -use proc_macro::TokenStream; - -use struct_db::struct_db as struct_db_impl; - -#[proc_macro_attribute] -pub fn struct_db(args: TokenStream, input: TokenStream) -> TokenStream { - struct_db_impl(args, input) -} diff --git a/struct_db_macro/src/model_attributes.rs b/struct_db_macro/src/model_attributes.rs deleted file mode 100644 index 4d79a95e..00000000 --- a/struct_db_macro/src/model_attributes.rs +++ /dev/null @@ -1,39 +0,0 @@ -use std::collections::HashSet; -use syn::meta::ParseNestedMeta; -use syn::parse::Result; -use syn::Ident; - -#[derive(Default, Clone)] -pub(crate) struct ModelAttributes { - pk_function_name: Option, // Primary Key Function Name - gk_function_names: HashSet, // Generic Secondary Key Function Names // gk ou gsk - // TODO: Derived Secondary Key Function Names: dk ou dsk -} - -impl ModelAttributes { - pub(crate) fn pk(&self) -> Ident { - self.pk_function_name.clone().expect("pk is required") - } - - pub(crate) fn pk_name(&self) -> String { - self.pk().to_string().to_lowercase() - } - - pub(crate) fn gk_function_names(&self) -> HashSet { - self.gk_function_names.clone() - } - - pub(crate) fn parse(&mut self, meta: ParseNestedMeta) -> Result<()> { - if meta.path.is_ident("pk") { - self.pk_function_name = Some(meta.value()?.parse()?); - } else if meta.path.is_ident("gk") { - self.gk_function_names.insert(meta.value()?.parse()?); - } else { - panic!( - "Unknown attribute: {}", - meta.path.get_ident().unwrap().to_string() - ); - } - Ok(()) - } -} diff --git a/struct_db_macro/src/model_struct_db.rs b/struct_db_macro/src/model_struct_db.rs deleted file mode 100644 index 8af37189..00000000 --- a/struct_db_macro/src/model_struct_db.rs +++ /dev/null @@ -1,131 +0,0 @@ -use crate::model_attributes::ModelAttributes; -use proc_macro::Span; -use quote::quote; -use syn::Ident; - -pub(crate) struct ModelStructDB { - struct_name: Ident, - attrs: ModelAttributes, -} - -impl ModelStructDB { - pub fn new(struct_name: Ident, attrs: ModelAttributes) -> Self { - Self { struct_name, attrs } - } - - pub fn table_name(&self) -> String { - self.struct_name().to_lowercase() - } - - pub fn struct_name(&self) -> String { - self.struct_name.clone().to_string() - } - - pub(crate) fn secondary_key_function_names(&self) -> Vec<(Ident, Ident)> { - let table_name = self.table_name(); - self.attrs - .gk_function_names() - .iter() - .map(|secondary_key_function_name| { - let secondary_key_function_name = - secondary_key_function_name.to_string().to_lowercase(); - let secondary_table_name = - format!("{}_{}", table_name, secondary_key_function_name); - ( - Ident::new(&secondary_key_function_name, Span::call_site().into()), - Ident::new(&secondary_table_name, Span::call_site().into()), - ) - }) - .collect::>() - } - - pub(crate) fn struct_db_gks(&self) -> proc_macro2::TokenStream { - let tokens = self - .secondary_key_function_names() - .iter() - .map(|secondary_key_function| { - let (gk_name, secondary_table) = secondary_key_function.clone(); - let secondary_table = secondary_table.to_string(); - quote! { - secondary_tables_name.insert(#secondary_table, self.#gk_name()); - } - }) - .collect::>(); - - quote! { - fn struct_db_gks(&self) -> std::collections::HashMap<&'static str, Vec> { - let mut secondary_tables_name = std::collections::HashMap::new(); - #(#tokens)* - secondary_tables_name - } - } - } - - pub(crate) fn struct_db_pk(&self) -> proc_macro2::TokenStream { - let primary_key_function_name = self.attrs.pk(); - quote! { - fn struct_db_pk(&self) -> Vec { - self.#primary_key_function_name() - } - } - } - - pub(crate) fn struct_db_schema(&self) -> proc_macro2::TokenStream { - let table_name = self.table_name(); - let primary_key_name = self.attrs.pk_name(); - let insert_tokens = self - .secondary_key_function_names() - .iter() - .map(|gk| { - let (_, secondary_table) = gk.clone(); - let secondary_table = secondary_table.to_string(); - quote! { - secondary_tables_name.insert(#secondary_table); - } - }) - .collect::>(); - - quote! { - fn struct_db_schema() -> struct_db::Schema { - let mut secondary_tables_name = std::collections::HashSet::new(); - #(#insert_tokens)* - struct_db::Schema { - table_name: #table_name, - primary_key: #primary_key_name, - secondary_tables_name: secondary_tables_name, - } - } - } - } - - pub(crate) fn keys_enum_name(&self) -> Ident { - let struct_name = self.struct_name(); - Ident::new(&format!("{}Key", struct_name), Span::call_site().into()) - } - - pub(crate) fn keys_enum(&self) -> Vec { - self.secondary_key_function_names() - .iter() - .map(|gk| { - let (gk_name, _) = gk.clone(); - quote! { - #gk_name - } - }) - .collect::>() - } - - pub(crate) fn keys_enum_fn_secondary_table_name(&self) -> Vec { - let keys_enum_name_token = self.keys_enum_name(); - self.secondary_key_function_names() - .iter() - .map(|gk| { - let (fn_key_name, secondary_table) = gk.clone(); - let secondary_table = secondary_table.to_string(); - quote! { - #keys_enum_name_token::#fn_key_name => #secondary_table, - } - }) - .collect::>() - } -} diff --git a/struct_db_macro/src/struct_db.rs b/struct_db_macro/src/struct_db.rs deleted file mode 100644 index 3c6bd1f0..00000000 --- a/struct_db_macro/src/struct_db.rs +++ /dev/null @@ -1,57 +0,0 @@ -use crate::model_attributes::ModelAttributes; -use crate::model_struct_db::ModelStructDB; -use proc_macro::TokenStream; -use quote::quote; -use syn::{parse_macro_input, DeriveInput}; - -pub fn struct_db(args: TokenStream, input: TokenStream) -> TokenStream { - let ast = parse_macro_input!(input as DeriveInput); - let struct_name = &ast.ident; - - let mut attrs = ModelAttributes::default(); - let model_attributes_parser = syn::meta::parser(|meta| attrs.parse(meta)); - parse_macro_input!(args with model_attributes_parser); - let model_struct_db = ModelStructDB::new(struct_name.clone(), attrs.clone()); - - let struct_db_pk = model_struct_db.struct_db_pk(); - let struct_db_gks = model_struct_db.struct_db_gks(); - let struct_db_schema = model_struct_db.struct_db_schema(); - - let keys_enum_name = model_struct_db.keys_enum_name(); - let keys_enum = model_struct_db.keys_enum(); - let keys_enum_fn_secondary_table_name = model_struct_db.keys_enum_fn_secondary_table_name(); - - let gen = quote! { - #ast - - impl struct_db::SDBItem for #struct_name { - fn struct_db_bincode_encode_to_vec(&self) -> Vec { - struct_db::bincode_encode_to_vec(self).expect("Failed to serialize the struct #struct_name") - } - - fn struct_db_bincode_decode_from_slice(slice: &[u8]) -> Self { - struct_db::bincode_decode_from_slice(slice).expect("Failed to deserialize the struct #struct_name").0 - } - - #struct_db_schema - #struct_db_pk - #struct_db_gks - } - - /// Index selection Enum for [#struct_name] - pub(crate) enum #keys_enum_name { - #(#keys_enum),* - } - - impl struct_db::KeyDefinition for #keys_enum_name { - fn secondary_table_name(&self) -> &'static str { - match self { - #(#keys_enum_fn_secondary_table_name)* - _ => panic!("Unknown key"), - } - } - } - }; - - gen.into() -} diff --git a/tests/00_bincode.rs b/tests/00_bincode.rs deleted file mode 100644 index cf78cdd0..00000000 --- a/tests/00_bincode.rs +++ /dev/null @@ -1,49 +0,0 @@ -#![cfg(not(feature = "native_model"))] -mod tests; - -use serde::{Deserialize, Serialize}; -use struct_db::*; -use struct_db_macro::struct_db; - -#[derive(Serialize, Deserialize, Eq, PartialEq, Debug)] -#[struct_db(pk = generate_my_primary_key)] -struct Item { - id: u32, - name: String, -} - -impl Item { - #[allow(dead_code)] - pub fn generate_my_primary_key(&self) -> Vec { - format!("{}-{}", self.id, self.name).into() - } -} - -#[test] -fn my_item_bincode_encode_to_vec() { - let my_item = Item { - id: 1, - name: "test".to_string(), - }; - - let encoded = my_item.struct_db_bincode_encode_to_vec(); - let decoded: (Item, _) = - bincode::serde::decode_from_slice(encoded.as_slice(), bincode::config::standard()).unwrap(); - - assert_eq!(my_item, decoded.0); -} - -#[test] -fn my_item_bincode_decode_from_slice() { - tests::init(); - - let my_item = Item { - id: 1, - name: "test".to_string(), - }; - - let encoded = my_item.struct_db_bincode_encode_to_vec(); - let decoded: Item = Item::struct_db_bincode_decode_from_slice(encoded.as_slice()); - - assert_eq!(my_item, decoded); -} diff --git a/tests/01_fn_primary_key.rs b/tests/01_fn_primary_key.rs deleted file mode 100644 index 8b137891..00000000 --- a/tests/01_fn_primary_key.rs +++ /dev/null @@ -1 +0,0 @@ - diff --git a/tests/02_simple_insert.rs b/tests/02_simple_insert.rs deleted file mode 100644 index bbdfa4c8..00000000 --- a/tests/02_simple_insert.rs +++ /dev/null @@ -1,35 +0,0 @@ -#![cfg(not(feature = "native_model"))] -mod tests; - -use serde::{Deserialize, Serialize}; -use struct_db::*; - -#[derive(Serialize, Deserialize, Eq, PartialEq, Debug)] -#[struct_db(pk = generate_my_primary_key)] -struct Item { - id: u32, - name: String, -} - -impl Item { - pub fn generate_my_primary_key(&self) -> Vec { - format!("{}-{}", self.id, self.name).into() - } -} - -#[test] -fn test_insert_my_item() { - let tf = tests::init(); - - let item = Item { - id: 1, - name: "test".to_string(), - }; - - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - - db.define::(); - - let txn = db.transaction().unwrap(); - txn.tables().insert(&txn, item).unwrap(); -} diff --git a/tests/03_simple_insert_get.rs b/tests/03_simple_insert_get.rs deleted file mode 100644 index 7df0d7bf..00000000 --- a/tests/03_simple_insert_get.rs +++ /dev/null @@ -1,107 +0,0 @@ -#![cfg(not(feature = "native_model"))] -mod tests; - -use serde::{Deserialize, Serialize}; -use struct_db::*; - -#[derive(Serialize, Deserialize, Eq, PartialEq, Debug)] -#[struct_db(pk = generate_my_primary_key)] -struct Item { - id: u32, - name: String, -} - -impl Item { - pub fn generate_my_primary_key(&self) -> Vec { - format!("{}-{}", self.id, self.name).into() - } -} - -#[test] -fn test_insert_get_my_item() { - let tf = tests::init(); - - let item = Item { - id: 1, - name: "test".to_string(), - }; - - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - - db.define::(); - - let txn = db.transaction().unwrap(); - { - let mut tables = txn.tables(); - tables.insert(&txn, item).unwrap(); - } - txn.commit().unwrap(); - - { - let txn_read = db.read_transaction().unwrap(); - let result: Item = txn_read - .tables() - .primary_get(&txn_read, b"1-test") - .unwrap() - .unwrap(); - assert_eq!(result.id, 1); - } -} - -#[test] -fn test_insert_get_my_item_write_txn() { - let tf = tests::init(); - - let item = Item { - id: 1, - name: "test".to_string(), - }; - - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - - db.define::(); - - let txn = db.transaction().unwrap(); - { - let mut tables = txn.tables(); - tables.insert(&txn, item).unwrap(); - } - txn.commit().unwrap(); - - let txn_read = db.read_transaction().unwrap(); - { - let mut tables = txn_read.tables(); - let result: Item = tables.primary_get(&txn_read, b"1-test").unwrap().unwrap(); - assert_eq!(result.id, 1); - assert_eq!(result.name, "test"); - } -} - -#[test] -fn test_insert_get_my_item_readonly_txn() { - let tf = tests::init(); - - let item = Item { - id: 1, - name: "test".to_string(), - }; - - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - - db.define::(); - - let txn = db.transaction().unwrap(); - { - let mut tables = txn.tables(); - tables.insert(&txn, item).unwrap(); - } - txn.commit().unwrap(); - - let txn_read = db.read_transaction().unwrap(); - { - let mut tables = txn_read.tables(); - let result: Item = tables.primary_get(&txn_read, b"1-test").unwrap().unwrap(); - assert_eq!(result.id, 1); - assert_eq!(result.name, "test"); - } -} diff --git a/tests/04_simple_len.rs b/tests/04_simple_len.rs deleted file mode 100644 index e4181af7..00000000 --- a/tests/04_simple_len.rs +++ /dev/null @@ -1,87 +0,0 @@ -#![cfg(not(feature = "native_model"))] -mod tests; - -use serde::{Deserialize, Serialize}; -use struct_db::*; - -#[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] -#[struct_db(pk = generate_my_primary_key)] -struct Item { - id: u32, - name: String, -} - -impl Item { - pub fn generate_my_primary_key(&self) -> Vec { - format!("{}-{}", self.id, self.name).into() - } -} - -#[test] -fn test_simple_len() { - let tf = tests::init(); - - let mut item = Item { - id: 1, - name: "test".to_string(), - }; - - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - - db.define::(); - - let txn = db.transaction().unwrap(); - { - let mut tables = txn.tables(); - tables.insert(&txn, item.clone()).unwrap(); - } - txn.commit().unwrap(); - - { - let txn_read = db.read_transaction().unwrap(); - let len = txn_read.tables().len::(&txn_read).unwrap(); - assert_eq!(len, 1); - } - - item.id = 2; - let txn = db.transaction().unwrap(); - { - let mut tables = txn.tables(); - tables.insert(&txn, item.clone()).unwrap(); - } - txn.commit().unwrap(); - - { - let txn_read = db.read_transaction().unwrap(); - let len = txn_read.tables().len::(&txn_read).unwrap(); - assert_eq!(len, 2); - } -} - -#[test] -fn test_simple_len_txn_write() { - let tf = tests::init(); - - let item = Item { - id: 1, - name: "test".to_string(), - }; - - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - - db.define::(); - - let txn = db.transaction().unwrap(); - { - let mut tables = txn.tables(); - tables.insert(&txn, item.clone()).unwrap(); - } - txn.commit().unwrap(); - - let txn_read = db.read_transaction().unwrap(); - { - let mut tables = txn_read.tables(); - let len = tables.len::(&txn_read).unwrap(); - assert_eq!(len, 1); - } -} diff --git a/tests/05_update.rs b/tests/05_update.rs deleted file mode 100644 index ab7b1879..00000000 --- a/tests/05_update.rs +++ /dev/null @@ -1,153 +0,0 @@ -#![cfg(not(feature = "native_model"))] -mod tests; - -use serde::{Deserialize, Serialize}; -use struct_db::*; - -#[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] -#[struct_db(pk = p_key)] -struct Item(u32); - -impl Item { - pub fn p_key(&self) -> Vec { - self.0.to_be_bytes().to_vec() - } -} - -#[test] -fn update() { - let tf = tests::init(); - - let o_v1 = Item(1); - - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - - db.define::(); - - // Insert the item - let tx = db.transaction().unwrap(); - { - let mut tables = tx.tables(); - tables.insert(&tx, o_v1.clone()).unwrap(); - } - tx.commit().unwrap(); - - // Check if the item is in the database - let tx_r = db.read_transaction().unwrap(); - { - let mut tables = tx_r.tables(); - let o2: Item = tables.primary_get(&tx_r, &o_v1.p_key()).unwrap().unwrap(); - assert_eq!(o_v1, o2); - } - - let o_v2 = Item(2); - - // Update the item - let tx = db.transaction().unwrap(); - { - let mut tables = tx.tables(); - tables.update(&tx, o_v1.clone(), o_v2.clone()).unwrap(); - } - tx.commit().unwrap(); - - // Check if the item v1 is not in the database - let tx_r = db.read_transaction().unwrap(); - { - let mut tables = tx_r.tables(); - let o2: Option = tables.primary_get(&tx_r, &o_v1.p_key()).unwrap(); - assert_eq!(o2, None); - } - // Check if the item v2 is in the database - let tx_r = db.read_transaction().unwrap(); - { - let mut tables = tx_r.tables(); - let o2: Item = tables.primary_get(&tx_r, &o_v2.p_key()).unwrap().unwrap(); - assert_eq!(o_v2, o2); - } -} - -#[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] -#[struct_db(pk = p_key, gk = s_key)] -struct Item1K(u32, String); - -impl Item1K { - pub fn p_key(&self) -> Vec { - self.0.to_be_bytes().to_vec() - } - - pub fn s_key(&self) -> Vec { - self.1.as_bytes().to_vec() - } -} - -#[test] -fn update_1k() { - let tf = tests::init(); - - let o_v1 = Item1K(1, "1".to_string()); - - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - - db.define::(); - - // Insert the item - let tx = db.transaction().unwrap(); - { - let mut tables = tx.tables(); - tables.insert(&tx, o_v1.clone()).unwrap(); - } - tx.commit().unwrap(); - - // Check if the item is in the database by primary key - let tx_r = db.read_transaction().unwrap(); - { - let mut tables = tx_r.tables(); - let o2: Item1K = tables.primary_get(&tx_r, &o_v1.p_key()).unwrap().unwrap(); - assert_eq!(o_v1, o2); - } - // Check if the item is in the database by secondary key - let tx_r = db.read_transaction().unwrap(); - { - let mut tables = tx_r.tables(); - let o2: Item1K = tables - .secondary_get(&tx_r, Item1KKey::s_key, &o_v1.s_key()) - .unwrap() - .unwrap(); - assert_eq!(o_v1, o2); - } - - let o_v2 = Item1K(2, "2".to_string()); - - // Update the item - let tx = db.transaction().unwrap(); - { - let mut tables = tx.tables(); - tables.update(&tx, o_v1.clone(), o_v2.clone()).unwrap(); - } - tx.commit().unwrap(); - - // Check if the item v1 is not in the database by primary key - let tx_r = db.read_transaction().unwrap(); - { - let mut tables = tx_r.tables(); - let o2: Option = tables.primary_get(&tx_r, &o_v1.p_key()).unwrap(); - assert_eq!(o2, None); - } - // Check if the item v1 is not in the database by secondary key - let tx_r = db.read_transaction().unwrap(); - { - let mut tables = tx_r.tables(); - let o2: Option = tables - .secondary_get(&tx_r, Item1KKey::s_key, &o_v1.s_key()) - .unwrap(); - assert_eq!(o2, None); - } - - // Check if the item v2 is in the database - let tx_r = db.read_transaction().unwrap(); - { - let mut tables = tx_r.tables(); - let o2: Item1K = tables.primary_get(&tx_r, &o_v2.p_key()).unwrap().unwrap(); - assert_eq!(o_v2, o2); - } -} diff --git a/tests/06_transaction.rs b/tests/06_transaction.rs deleted file mode 100644 index b43a72d3..00000000 --- a/tests/06_transaction.rs +++ /dev/null @@ -1,160 +0,0 @@ -#![cfg(not(feature = "native_model"))] -mod tests; - -use serde::{Deserialize, Serialize}; -use std::panic::AssertUnwindSafe; -use struct_db::*; - -#[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] -#[struct_db(pk = generate_my_primary_key)] -struct Item { - id: u32, - name: String, -} - -impl Item { - pub fn generate_my_primary_key(&self) -> Vec { - format!("{}-{}", self.id, self.name).into() - } -} - -#[test] -fn test_transaction_obj_1() { - let tf = tests::init(); - - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - db.define::(); - - let item = Item { - id: 1, - name: "test".to_string(), - }; - { - let tx_write = db.transaction().unwrap(); - { - let mut tables = tx_write.tables(); - tables.insert(&tx_write, item).unwrap(); - // Random fail here... - } - tx_write.commit().unwrap(); - } - - let txn_read = db.read_transaction().unwrap(); - let result: Item = txn_read - .tables() - .primary_get(&txn_read, b"1-test") - .unwrap() - .unwrap(); - assert_eq!(result.id, 1); -} - -#[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] -#[struct_db(pk = generate_my_primary_key)] -struct Item2 { - id: u32, - name: String, -} - -impl Item2 { - pub fn generate_my_primary_key(&self) -> Vec { - format!("{}-{}", self.id, self.name).into() - } -} - -#[test] -fn test_transaction_obj_1_and_obj_2() { - let tf = tests::init(); - - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - db.define::(); - db.define::(); - - let item_1 = Item { - id: 1, - name: "test".to_string(), - }; - let item_2 = Item2 { - id: 2, - name: "test".to_string(), - }; - - { - let tx_write = db.transaction().unwrap(); - { - let mut tables = tx_write.tables(); - tables.insert(&tx_write, item_1).unwrap(); - tables.insert(&tx_write, item_2).unwrap(); - } - tx_write.commit().unwrap(); - } - - let txn_read = db.read_transaction().unwrap(); - let result: Item = txn_read - .tables() - .primary_get(&txn_read, b"1-test") - .unwrap() - .unwrap(); - assert_eq!(result.id, 1); - let result: Item2 = txn_read - .tables() - .primary_get(&txn_read, b"2-test") - .unwrap() - .unwrap(); - assert_eq!(result.id, 2); -} - -#[allow(unreachable_code)] -#[test] -fn test_transaction_fail() { - let tf = tests::init(); - - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - db.define::(); - - let item_1 = Item { - id: 1, - name: "test".to_string(), - }; - { - let tx_write = db.transaction().unwrap(); - { - let mut tables = tx_write.tables(); - tables.insert(&tx_write, item_1).unwrap(); - // Random fail here... - } - tx_write.commit().unwrap(); - } - { - let txn_read = db.read_transaction().unwrap(); - let result: Item = txn_read - .tables() - .primary_get(&txn_read, b"1-test") - .unwrap() - .unwrap(); - assert_eq!(result.id, 1); - } - - let item_2 = Item { - id: 2, - name: "test".to_string(), - }; - let result = std::panic::catch_unwind(AssertUnwindSafe(|| { - let tx_write = db.transaction().unwrap(); - { - let mut tables = tx_write.tables(); - tables.insert(&tx_write, item_2).unwrap(); - panic!("Random panic here...") - } - - tx_write.commit().unwrap(); - })); - - assert!(result.is_err()); - - let txn_read = db.read_transaction().unwrap(); - let result = txn_read - .tables() - .primary_get::(&txn_read, b"2-test") - .unwrap(); - assert!(result.is_none()); -} diff --git a/tests/07_simple_multithreads.rs b/tests/07_simple_multithreads.rs deleted file mode 100644 index 35388f84..00000000 --- a/tests/07_simple_multithreads.rs +++ /dev/null @@ -1,84 +0,0 @@ -#![cfg(not(feature = "native_model"))] -mod tests; - -use serde::{Deserialize, Serialize}; -use std::sync::Arc; -use struct_db::*; - -#[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] -#[struct_db(pk = generate_my_primary_key)] -struct Item { - id: u32, - name: String, -} - -impl Item { - pub fn generate_my_primary_key(&self) -> Vec { - format!("{}-{}", self.id, self.name).into() - } -} - -#[test] -fn multi_threads() { - let tf = tests::init(); - - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - db.define::(); - - let db = Arc::new(db); - - let db_thread_1 = db.clone(); - let handle_thread_1 = std::thread::spawn(move || { - let item_a = Item { - id: 1, - name: "a".to_string(), - }; - { - let tx_write = db_thread_1.transaction().unwrap(); - { - let mut tables = tx_write.tables(); - tables.insert(&tx_write, item_a).unwrap(); - } - tx_write.commit().unwrap(); - } - }); - - let db_thread_2 = db.clone(); - let handle_thread_2 = std::thread::spawn(move || { - let item_b = Item { - id: 1, - name: "b".to_string(), - }; - { - let tx_write = db_thread_2.transaction().unwrap(); - { - let mut tables = tx_write.tables(); - tables.insert(&tx_write, item_b).unwrap(); - } - tx_write.commit().unwrap(); - } - }); - - handle_thread_1.join().unwrap(); - handle_thread_2.join().unwrap(); - - { - let txn_read = db.read_transaction().unwrap(); - let len = txn_read.tables().len::(&txn_read).unwrap(); - assert_eq!(len, 2); - - let item_a = txn_read - .tables() - .primary_get::(&txn_read, b"1-a") - .unwrap() - .unwrap(); - assert_eq!(item_a.name, "a".to_string()); - - let item_b = txn_read - .tables() - .primary_get::(&txn_read, b"1-b") - .unwrap() - .unwrap(); - assert_eq!(item_b.name, "b".to_string()); - } -} diff --git a/tests/08_fn_key.rs b/tests/08_fn_key.rs deleted file mode 100644 index 7513e9e7..00000000 --- a/tests/08_fn_key.rs +++ /dev/null @@ -1,212 +0,0 @@ -#![cfg(not(feature = "native_model"))] -mod tests; - -use serde::{Deserialize, Serialize}; -use std::collections::HashSet; -use std::iter::FromIterator; -use struct_db::*; - -#[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] -#[struct_db( - pk = generate_my_primary_key, - gk = secondary_key_1, - gk = secondary_key_2 -)] -struct Item { - id: u32, - name: String, -} - -impl Item { - pub fn generate_my_primary_key(&self) -> Vec { - format!("{}-{}", self.id, self.name).into() - } - pub fn secondary_key_1(&self) -> Vec { - format!("{}", self.id).into() - } - pub fn secondary_key_2(&self) -> Vec { - format!("{}", self.name).into() - } -} - -#[test] -fn test_gk() { - let item = Item { - id: 1, - name: "test".to_string(), - }; - let db_keys = item.struct_db_gks(); - assert_eq!(db_keys.len(), 2); - - assert_eq!(db_keys.get("item_secondary_key_1").unwrap(), b"1"); - assert_eq!(db_keys.get("item_secondary_key_2").unwrap(), b"test"); -} - -#[test] -fn test_init_table() { - let init_table = Item::struct_db_schema(); - assert_eq!(init_table.table_name, "item"); - assert_eq!(init_table.primary_key, "generate_my_primary_key"); - assert_eq!( - init_table.secondary_tables_name, - HashSet::from_iter(vec!["item_secondary_key_1", "item_secondary_key_2"].into_iter()) - ); -} - -#[test] -fn test_struct_db_gks() { - let secondary_table_name_1 = ItemKey::secondary_key_1.secondary_table_name(); - assert_eq!(secondary_table_name_1, "item_secondary_key_1"); - let secondary_table_name_2 = ItemKey::secondary_key_2.secondary_table_name(); - assert_eq!(secondary_table_name_2, "item_secondary_key_2"); -} - -#[test] -fn test_insert_duplicate_key() { - let tf = tests::init(); - - let item_1 = Item { - id: 1, - name: "test".to_string(), - }; - - let item_2 = Item { - id: 2, - name: "test".to_string(), - }; - - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - - db.define::(); - - let txn = db.transaction().unwrap(); - let mut tables = txn.tables(); - tables.insert(&txn, item_1).unwrap(); - let result = tables.insert(&txn, item_2); - assert!(result.is_err()); - assert!(matches!(result.unwrap_err(), Error::DuplicateKey { .. })); -} - -#[test] -fn test_insert_and_get_on_transaction() { - let tf = tests::init(); - - let item_1 = Item { - id: 1, - name: "test".to_string(), - }; - - let item_2 = Item { - id: 2, - name: "test2".to_string(), - }; - - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - - db.define::(); - - let txn = db.transaction().unwrap(); - { - let mut tables = txn.tables(); - tables.insert(&txn, item_1).unwrap(); - tables.insert(&txn, item_2).unwrap(); - } - txn.commit().unwrap(); - - let txn = db.transaction().unwrap(); - { - let mut tables = txn.tables(); - let result: Item = tables - .secondary_get(&txn, ItemKey::secondary_key_1, b"1") - .unwrap() - .unwrap(); - assert_eq!(result.name, "test"); - let result: Item = tables - .secondary_get(&txn, ItemKey::secondary_key_2, b"test2") - .unwrap() - .unwrap(); - assert_eq!(result.id, 2); - } -} - -#[test] -fn test_insert_and_get_on_readonly_transaction() { - let tf = tests::init(); - - let item_1 = Item { - id: 1, - name: "test".to_string(), - }; - - let item_2 = Item { - id: 2, - name: "test2".to_string(), - }; - - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - - db.define::(); - - let txn = db.transaction().unwrap(); - { - let mut tables = txn.tables(); - tables.insert(&txn, item_1).unwrap(); - tables.insert(&txn, item_2).unwrap(); - } - txn.commit().unwrap(); - - let txn_read = db.read_transaction().unwrap(); - { - let mut tables = txn_read.tables(); - let result: Item = tables - .secondary_get(&txn_read, ItemKey::secondary_key_1, b"1") - .unwrap() - .unwrap(); - assert_eq!(result.name, "test"); - let result: Item = tables - .secondary_get(&txn_read, ItemKey::secondary_key_2, b"test2") - .unwrap() - .unwrap(); - assert_eq!(result.id, 2); - } -} - -#[test] -fn test_insert_and_get() { - let tf = tests::init(); - - let item_1 = Item { - id: 1, - name: "test".to_string(), - }; - - let item_2 = Item { - id: 2, - name: "test2".to_string(), - }; - - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - - db.define::(); - - let txn = db.transaction().unwrap(); - { - let mut tables = txn.tables(); - tables.insert(&txn, item_1).unwrap(); - tables.insert(&txn, item_2).unwrap(); - } - txn.commit().unwrap(); - - let txn_read = db.read_transaction().unwrap(); - let mut tables = txn_read.tables(); - let result: Item = tables - .secondary_get(&txn_read, ItemKey::secondary_key_1, b"1") - .unwrap() - .unwrap(); - assert_eq!(result.name, "test"); - let result: Item = tables - .secondary_get(&txn_read, ItemKey::secondary_key_2, b"test2") - .unwrap() - .unwrap(); - assert_eq!(result.id, 2); -} diff --git a/tests/09_iterator.rs b/tests/09_iterator.rs deleted file mode 100644 index b8218846..00000000 --- a/tests/09_iterator.rs +++ /dev/null @@ -1,694 +0,0 @@ -#![cfg(not(feature = "native_model"))] -mod tests; - -use serde::{Deserialize, Serialize}; -use struct_db::*; - -#[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] -#[struct_db( - pk = generate_my_primary_key, - gk = secondary_key_1, - gk = secondary_key_2 -)] -struct Item { - id: u32, - name: String, -} - -impl Item { - pub fn new(id: u32, name: &str) -> Self { - Self { - id, - name: name.to_string(), - } - } - pub fn generate_my_primary_key(&self) -> Vec { - self.id.to_be_bytes().to_vec() - } - pub fn secondary_key_1(&self) -> Vec { - format!("{}", self.id).into() - } - pub fn secondary_key_2(&self) -> Vec { - format!("{}", self.name).into() - } -} - -#[test] -fn test_iter() { - let tf = tests::init(); - - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - - db.define::(); - - let txn = db.transaction().unwrap(); - { - let mut tables = txn.tables(); - tables.insert(&txn, Item::new(1, "test")).unwrap(); - tables.insert(&txn, Item::new(2, "test2")).unwrap(); - } - txn.commit().unwrap(); - - let txn = db.read_transaction().unwrap(); - { - let mut tables = txn.tables(); - let result: Vec = tables.primary_iter(&txn).unwrap().collect(); - assert_eq!(result.len(), 2); - - let obj1 = result.get(0).unwrap(); - assert_eq!(obj1.id, 1); - assert_eq!(obj1.name, "test"); - - let obj2 = result.get(1).unwrap(); - assert_eq!(obj2.id, 2); - assert_eq!(obj2.name, "test2"); - } -} - -// Check if the use of BigEndian is correct -#[test] -fn test_iter_many_items_to_be_bytes() { - let tf = tests::init(); - - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - - db.define::(); - - let txn = db.transaction().unwrap(); - { - let mut tables = txn.tables(); - // Insert 1000 items - for i in 0..257 { - tables - .insert(&txn, Item::new(i, format!("test_{}", i).as_str())) - .unwrap(); - } - } - txn.commit().unwrap(); - - let txn = db.read_transaction().unwrap(); - { - let mut tables = txn.tables(); - let iter: Vec = tables.primary_iter(&txn).unwrap().collect(); - assert_eq!(iter.len(), 257); - - let obj1 = iter.get(0).unwrap(); - assert_eq!(obj1.id, 0); - assert_eq!(obj1.name, "test_0"); - - let obj2 = iter.get(1).unwrap(); - assert_eq!(obj2.id, 1); - assert_eq!(obj2.name, "test_1"); - - let obj3 = iter.get(256).unwrap(); - assert_eq!(obj3.id, 256); - assert_eq!(obj3.name, "test_256"); - } -} - -#[test] -fn test_double_ended_iter() { - let tf = tests::init(); - - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - - db.define::(); - - let txn = db.transaction().unwrap(); - { - let mut tables = txn.tables(); - tables.insert(&txn, Item::new(1, "test")).unwrap(); - tables.insert(&txn, Item::new(2, "test2")).unwrap(); - } - txn.commit().unwrap(); - - let txn = db.read_transaction().unwrap(); - { - let mut tables = txn.tables(); - let iter = tables.primary_iter(&txn).unwrap(); - let result: Vec = iter.rev().collect(); - - assert_eq!(result.len(), 2); - - let obj1 = result.get(0).unwrap(); - - assert_eq!(obj1.id, 2); - assert_eq!(obj1.name, "test2"); - - let obj2 = result.get(1).unwrap(); - assert_eq!(obj2.id, 1); - assert_eq!(obj2.name, "test"); - } -} - -#[test] -fn test_iter_range() { - let tf = tests::init(); - - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - - db.define::(); - - let txn = db.transaction().unwrap(); - { - let mut tables = txn.tables(); - tables.insert(&txn, Item::new(1, "test")).unwrap(); - tables.insert(&txn, Item::new(2, "test2")).unwrap(); - tables.insert(&txn, Item::new(3, "test3")).unwrap(); - } - txn.commit().unwrap(); - - let txn = db.read_transaction().unwrap(); - { - let mut tables = txn.tables(); - let result: Vec = tables - .primary_iter_range(&txn, ..2_i32.to_be_bytes().as_slice()) - .unwrap() - .collect(); - assert_eq!(result.len(), 1); - - let obj1 = result.get(0).unwrap(); - assert_eq!(obj1.id, 1); - assert_eq!(obj1.name, "test"); - - let result: Vec = tables - .primary_iter_range(&txn, 2_i32.to_be_bytes().as_slice()..) - .unwrap() - .collect(); - assert_eq!(result.len(), 2); - - let obj1 = result.get(0).unwrap(); - assert_eq!(obj1.id, 2); - assert_eq!(obj1.name, "test2"); - - let obj2 = result.get(1).unwrap(); - assert_eq!(obj2.id, 3); - assert_eq!(obj2.name, "test3"); - - let result: Vec = tables - .primary_iter_range( - &txn, - 2_i32.to_be_bytes().as_slice()..3_i32.to_be_bytes().as_slice(), - ) - .unwrap() - .collect(); - assert_eq!(result.len(), 1); - - let obj1 = result.get(0).unwrap(); - assert_eq!(obj1.id, 2); - assert_eq!(obj1.name, "test2"); - } -} - -#[test] -fn test_iter_by_key() { - let tf = tests::init(); - - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - - db.define::(); - - let txn = db.transaction().unwrap(); - { - let mut tables = txn.tables(); - tables.insert(&txn, Item::new(1, "test")).unwrap(); - tables.insert(&txn, Item::new(2, "test2")).unwrap(); - } - txn.commit().unwrap(); - - let txn = db.read_transaction().unwrap(); - { - let mut tables = txn.tables(); - let result: Vec = tables - .secondary_iter(&txn, ItemKey::secondary_key_1) - .unwrap() - .collect(); - - assert_eq!(result.len(), 2); - - let obj1 = result.get(0).unwrap(); - assert_eq!(obj1.id, 1); - assert_eq!(obj1.name, "test"); - - let obj2 = result.get(1).unwrap(); - assert_eq!(obj2.id, 2); - assert_eq!(obj2.name, "test2"); - } -} - -#[test] -fn test_double_ended_iter_by_key() { - let tf = tests::init(); - - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - - db.define::(); - - let txn = db.transaction().unwrap(); - { - let mut tables = txn.tables(); - tables.insert(&txn, Item::new(1, "test")).unwrap(); - tables.insert(&txn, Item::new(2, "test2")).unwrap(); - } - txn.commit().unwrap(); - - let txn = db.read_transaction().unwrap(); - { - let mut tables = txn.tables(); - let iter = tables - .secondary_iter(&txn, ItemKey::secondary_key_1) - .unwrap(); - let result: Vec = iter.rev().collect(); - - assert_eq!(result.len(), 2); - - let obj1 = result.get(0).unwrap(); - assert_eq!(obj1.id, 2); - assert_eq!(obj1.name, "test2"); - - let obj2 = result.get(1).unwrap(); - assert_eq!(obj2.id, 1); - assert_eq!(obj2.name, "test"); - } -} - -#[test] -fn test_double_ended_iter_by_key_range() { - let tf = tests::init(); - - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - - db.define::(); - - let txn = db.transaction().unwrap(); - { - let mut tables = txn.tables(); - tables.insert(&txn, Item::new(1, "test")).unwrap(); - tables.insert(&txn, Item::new(2, "test2")).unwrap(); - tables.insert(&txn, Item::new(3, "test3")).unwrap(); - } - txn.commit().unwrap(); - - let txn = db.read_transaction().unwrap(); - { - let mut tables = txn.tables(); - let iter = tables - .secondary_iter_range(&txn, ItemKey::secondary_key_1, ..b"2".as_slice()) - .unwrap(); - let result: Vec = iter.rev().collect(); - - assert_eq!(result.len(), 1); - - let obj1 = result.get(0).unwrap(); - assert_eq!(obj1.id, 1); - assert_eq!(obj1.name, "test"); - - let iter = tables - .secondary_iter_range(&txn, ItemKey::secondary_key_1, b"2".as_slice()..) - .unwrap(); - let result: Vec = iter.rev().collect(); - - assert_eq!(result.len(), 2); - - let obj1 = result.get(0).unwrap(); - assert_eq!(obj1.id, 3); - assert_eq!(obj1.name, "test3"); - - let obj2 = result.get(1).unwrap(); - assert_eq!(obj2.id, 2); - assert_eq!(obj2.name, "test2"); - - let iter = tables - .secondary_iter_range( - &txn, - ItemKey::secondary_key_1, - b"2".as_slice()..b"3".as_slice(), - ) - .unwrap(); - let result: Vec = iter.rev().collect(); - - assert_eq!(result.len(), 1); - - let obj1 = result.get(0).unwrap(); - assert_eq!(obj1.id, 2); - assert_eq!(obj1.name, "test2"); - } -} - -#[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] -#[struct_db(pk = generate_my_primary_key)] -struct ItemFlag { - name: String, -} - -impl ItemFlag { - pub fn new(name: &str) -> Self { - Self { - name: name.to_string(), - } - } - - pub fn generate_my_primary_key(&self) -> Vec { - self.name.clone().into() - } -} - -#[test] -fn test_start_with_scenario() { - let tf = tests::init(); - - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - - db.define::(); - - let txn = db.transaction().unwrap(); - { - let mut tables = txn.tables(); - // Red flag - tables.insert(&txn, ItemFlag::new("red:1")).unwrap(); - tables.insert(&txn, ItemFlag::new("red:2")).unwrap(); - tables.insert(&txn, ItemFlag::new("red:3")).unwrap(); - // Blue flag - tables.insert(&txn, ItemFlag::new("blue:1")).unwrap(); - tables.insert(&txn, ItemFlag::new("blue:2")).unwrap(); - tables.insert(&txn, ItemFlag::new("blue:3")).unwrap(); - // Green flag - tables.insert(&txn, ItemFlag::new("green:1")).unwrap(); - tables.insert(&txn, ItemFlag::new("green:2")).unwrap(); - tables.insert(&txn, ItemFlag::new("green:3")).unwrap(); - } - txn.commit().unwrap(); - - let prefix = [ - b"red:".as_slice(), - b"blue:".as_slice(), - b"green:".as_slice(), - ]; - for p in prefix.iter() { - let txn = db.read_transaction().unwrap(); - { - let mut tables = txn.tables(); - let iter = tables.primary_iter_start_with(&txn, p).unwrap(); - let result: Vec = iter.collect(); - assert_eq!(result.len(), 3); - - let obj1 = result.get(0).unwrap(); - assert_eq!(obj1.name, format!("{}1", std::str::from_utf8(p).unwrap())); - - let obj2 = result.get(1).unwrap(); - assert_eq!(obj2.name, format!("{}2", std::str::from_utf8(p).unwrap())); - - let obj3 = result.get(2).unwrap(); - assert_eq!(obj3.name, format!("{}3", std::str::from_utf8(p).unwrap())); - } - } -} - -#[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] -#[struct_db(pk = generate_my_primary_key, gk = flag)] -struct ItemIdFlag { - id: String, - flag: String, -} - -impl ItemIdFlag { - pub fn new(id: &str, name: &str) -> Self { - Self { - id: id.to_string(), - flag: name.to_string(), - } - } - - pub fn generate_my_primary_key(&self) -> Vec { - self.id.clone().into() - } - pub fn flag(&self) -> Vec { - format!("{}:{}", self.flag, self.id).into() - } -} - -#[test] -fn test_start_with_by_key_scenario_write_txn() { - let tf = tests::init(); - - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - - db.define::(); - - let txn = db.transaction().unwrap(); - { - let mut tables = txn.tables(); - // Red flag - tables.insert(&txn, ItemIdFlag::new("1", "red")).unwrap(); - tables.insert(&txn, ItemIdFlag::new("2", "red")).unwrap(); - tables.insert(&txn, ItemIdFlag::new("3", "red")).unwrap(); - // Blue flag - tables.insert(&txn, ItemIdFlag::new("4", "blue")).unwrap(); - tables.insert(&txn, ItemIdFlag::new("5", "blue")).unwrap(); - tables.insert(&txn, ItemIdFlag::new("6", "blue")).unwrap(); - // Green flag - tables.insert(&txn, ItemIdFlag::new("7", "green")).unwrap(); - tables.insert(&txn, ItemIdFlag::new("8", "green")).unwrap(); - tables.insert(&txn, ItemIdFlag::new("9", "green")).unwrap(); - } - txn.commit().unwrap(); - - let prefix = [ - b"red:".as_slice(), - b"blue:".as_slice(), - b"green:".as_slice(), - ]; - for p in prefix.iter() { - let txn = db.transaction().unwrap(); - { - let mut tables = txn.tables(); - let iter = tables - .secondary_iter_start_with(&txn, ItemIdFlagKey::flag, p) - .unwrap(); - let result: Vec = iter.collect(); - assert_eq!(result.len(), 3); - - let obj1 = result.get(0).unwrap(); - assert_eq!( - format!("{}:", obj1.flag), - format!("{}", std::str::from_utf8(p).unwrap()) - ); - - let obj2 = result.get(1).unwrap(); - assert_eq!( - format!("{}:", obj2.flag), - format!("{}", std::str::from_utf8(p).unwrap()) - ); - - let obj3 = result.get(2).unwrap(); - assert_eq!( - format!("{}:", obj3.flag), - format!("{}", std::str::from_utf8(p).unwrap()) - ); - } - } -} - -#[test] -fn test_start_with_by_key_scenario_readonly_txn() { - let tf = tests::init(); - - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - - db.define::(); - - let txn = db.transaction().unwrap(); - { - let mut tables = txn.tables(); - // Red flag - tables.insert(&txn, ItemIdFlag::new("1", "red")).unwrap(); - tables.insert(&txn, ItemIdFlag::new("2", "red")).unwrap(); - tables.insert(&txn, ItemIdFlag::new("3", "red")).unwrap(); - // Blue flag - tables.insert(&txn, ItemIdFlag::new("4", "blue")).unwrap(); - tables.insert(&txn, ItemIdFlag::new("5", "blue")).unwrap(); - tables.insert(&txn, ItemIdFlag::new("6", "blue")).unwrap(); - // Green flag - tables.insert(&txn, ItemIdFlag::new("7", "green")).unwrap(); - tables.insert(&txn, ItemIdFlag::new("8", "green")).unwrap(); - tables.insert(&txn, ItemIdFlag::new("9", "green")).unwrap(); - } - txn.commit().unwrap(); - - let prefix = [ - b"red:".as_slice(), - b"blue:".as_slice(), - b"green:".as_slice(), - ]; - for p in prefix.iter() { - let txn = db.read_transaction().unwrap(); - { - let mut tables = txn.tables(); - let iter = tables - .secondary_iter_start_with(&txn, ItemIdFlagKey::flag, p) - .unwrap(); - let result: Vec = iter.collect(); - assert_eq!(result.len(), 3); - - let obj1 = result.get(0).unwrap(); - assert_eq!( - format!("{}:", obj1.flag), - format!("{}", std::str::from_utf8(p).unwrap()) - ); - - let obj2 = result.get(1).unwrap(); - assert_eq!( - format!("{}:", obj2.flag), - format!("{}", std::str::from_utf8(p).unwrap()) - ); - - let obj3 = result.get(2).unwrap(); - assert_eq!( - format!("{}:", obj3.flag), - format!("{}", std::str::from_utf8(p).unwrap()) - ); - } - } -} - -#[test] -fn test_txn_write_iter() { - let tf = tests::init(); - - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - - db.define::(); - - let txn = db.transaction().unwrap(); - { - let mut tables = txn.tables(); - tables.insert(&txn, Item::new(1, "test")).unwrap(); - tables.insert(&txn, Item::new(2, "test2")).unwrap(); - } - txn.commit().unwrap(); - - let txn = db.transaction().unwrap(); - { - let mut tables = txn.tables(); - let result: Vec = tables.primary_iter(&txn).unwrap().collect(); - assert_eq!(result.len(), 2); - - let obj1 = result.get(0).unwrap(); - assert_eq!(obj1.id, 1); - assert_eq!(obj1.name, "test"); - - let obj2 = result.get(1).unwrap(); - assert_eq!(obj2.id, 2); - assert_eq!(obj2.name, "test2"); - } -} - -#[test] -fn test_txn_write_iter_range() { - let tf = tests::init(); - - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - - db.define::(); - - let txn = db.transaction().unwrap(); - { - let mut tables = txn.tables(); - tables.insert(&txn, Item::new(1, "test")).unwrap(); - tables.insert(&txn, Item::new(2, "test2")).unwrap(); - tables.insert(&txn, Item::new(3, "test3")).unwrap(); - } - txn.commit().unwrap(); - - let txn = db.transaction().unwrap(); - { - let mut tables = txn.tables(); - let result: Vec = tables - .primary_iter_range(&txn, ..2_i32.to_be_bytes().as_slice()) - .unwrap() - .collect(); - assert_eq!(result.len(), 1); - - let obj1 = result.get(0).unwrap(); - assert_eq!(obj1.id, 1); - assert_eq!(obj1.name, "test"); - - let result: Vec = tables - .primary_iter_range(&txn, 2_i32.to_be_bytes().as_slice()..) - .unwrap() - .collect(); - assert_eq!(result.len(), 2); - - let obj1 = result.get(0).unwrap(); - assert_eq!(obj1.id, 2); - assert_eq!(obj1.name, "test2"); - - let obj2 = result.get(1).unwrap(); - assert_eq!(obj2.id, 3); - assert_eq!(obj2.name, "test3"); - - let result: Vec = tables - .primary_iter_range( - &txn, - 2_i32.to_be_bytes().as_slice()..3_i32.to_be_bytes().as_slice(), - ) - .unwrap() - .collect(); - assert_eq!(result.len(), 1); - - let obj1 = result.get(0).unwrap(); - assert_eq!(obj1.id, 2); - assert_eq!(obj1.name, "test2"); - } -} - -#[test] -fn test_txn_write_start_with_scenario() { - let tf = tests::init(); - - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - - db.define::(); - - let txn = db.transaction().unwrap(); - { - let mut tables = txn.tables(); - // Red flag - tables.insert(&txn, ItemFlag::new("red:1")).unwrap(); - tables.insert(&txn, ItemFlag::new("red:2")).unwrap(); - tables.insert(&txn, ItemFlag::new("red:3")).unwrap(); - // Blue flag - tables.insert(&txn, ItemFlag::new("blue:1")).unwrap(); - tables.insert(&txn, ItemFlag::new("blue:2")).unwrap(); - tables.insert(&txn, ItemFlag::new("blue:3")).unwrap(); - // Green flag - tables.insert(&txn, ItemFlag::new("green:1")).unwrap(); - tables.insert(&txn, ItemFlag::new("green:2")).unwrap(); - tables.insert(&txn, ItemFlag::new("green:3")).unwrap(); - } - txn.commit().unwrap(); - - let prefix = [ - b"red:".as_slice(), - b"blue:".as_slice(), - b"green:".as_slice(), - ]; - for p in prefix.iter() { - let txn = db.transaction().unwrap(); - { - let mut tables = txn.tables(); - let iter = tables.primary_iter_start_with(&txn, p).unwrap(); - let result: Vec = iter.collect(); - assert_eq!(result.len(), 3); - - let obj1 = result.get(0).unwrap(); - assert_eq!(obj1.name, format!("{}1", std::str::from_utf8(p).unwrap())); - - let obj2 = result.get(1).unwrap(); - assert_eq!(obj2.name, format!("{}2", std::str::from_utf8(p).unwrap())); - - let obj3 = result.get(2).unwrap(); - assert_eq!(obj3.name, format!("{}3", std::str::from_utf8(p).unwrap())); - } - } -} diff --git a/tests/10_remove.rs b/tests/10_remove.rs deleted file mode 100644 index 9dcc2006..00000000 --- a/tests/10_remove.rs +++ /dev/null @@ -1,58 +0,0 @@ -#![cfg(not(feature = "native_model"))] -mod tests; - -use serde::{Deserialize, Serialize}; -use struct_db::*; - -#[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] -#[struct_db(pk = p_key)] -struct O(u32); - -impl O { - pub fn p_key(&self) -> Vec { - self.0.to_be_bytes().to_vec() - } -} - -#[test] -fn remove() { - let tf = tests::init(); - - let o = O(1); - - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - - db.define::(); - - // Insert the item - let tx = db.transaction().unwrap(); - { - let mut tables = tx.tables(); - tables.insert(&tx, o.clone()).unwrap(); - } - tx.commit().unwrap(); - - // Check if the item is in the database - let tx_r = db.read_transaction().unwrap(); - { - let mut tables = tx_r.tables(); - let o2: O = tables.primary_get(&tx_r, &o.p_key()).unwrap().unwrap(); - assert_eq!(o, o2); - } - - // Remove the item - let tx = db.transaction().unwrap(); - { - let mut tables = tx.tables(); - tables.remove(&tx, o.clone()).unwrap(); - } - tx.commit().unwrap(); - - // Check if the item is not in the database - let tx_r = db.read_transaction().unwrap(); - { - let mut tables = tx_r.tables(); - let o2: Option = tables.primary_get(&tx_r, &o.p_key()).unwrap(); - assert_eq!(o2, None); - } -} diff --git a/tests/11_watch.rs b/tests/11_watch.rs deleted file mode 100644 index 52a4b802..00000000 --- a/tests/11_watch.rs +++ /dev/null @@ -1,462 +0,0 @@ -#![cfg(not(feature = "native_model"))] -#![cfg(not(feature = "tokio"))] -mod tests; - -use serde::{Deserialize, Serialize}; -use std::sync::Arc; -use std::time::Duration; -use struct_db::watch::Event; -use struct_db::*; - -#[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] -#[struct_db(pk = p_key)] -struct A(u32); - -impl A { - pub fn p_key(&self) -> Vec { - self.0.to_be_bytes().to_vec() - } -} - -#[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] -#[struct_db(pk = p_key)] -struct B(u32); - -impl B { - pub fn p_key(&self) -> Vec { - self.0.to_be_bytes().to_vec() - } -} - -const TIMEOUT: Duration = Duration::from_secs(1); - -#[test] -fn watch_one_primary_key() { - let tf = tests::init(); - - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - - db.define::(); - - let a = A(1); - - let (recv, _) = db.primary_watch::(Some(&a.p_key())).unwrap(); - - let tx = db.transaction().unwrap(); - { - let mut tables = tx.tables(); - tables.insert(&tx, a.clone()).unwrap(); - } - tx.commit().unwrap(); - - for _ in 0..1 { - let inner_event: A = if let Event::Insert(event) = recv.recv_timeout(TIMEOUT).unwrap() { - event.inner() - } else { - panic!("wrong event") - }; - assert_eq!(inner_event, a); - } - assert!(recv.try_recv().is_err()); -} - -#[test] -fn watch_all_primary_key() { - let tf = tests::init(); - - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - - db.define::(); - - let a1 = A(1); - let a2 = A(2); - - let (recv, _) = db.primary_watch::(None).unwrap(); - let tx = db.transaction().unwrap(); - { - let mut tables = tx.tables(); - tables.insert(&tx, a1.clone()).unwrap(); - tables.insert(&tx, a2.clone()).unwrap(); - } - tx.commit().unwrap(); - - for _ in 0..2 { - let inner_event: A = if let Event::Insert(event) = recv.recv_timeout(TIMEOUT).unwrap() { - event.inner() - } else { - panic!("wrong event") - }; - assert!(inner_event == a1 || inner_event == a2); - } - assert!(recv.try_recv().is_err()); -} - -#[test] -#[ignore] -fn watch_multithreading() { - let tf = tests::init(); - - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - - db.define::(); - - let db = Arc::new(db); - let dba = Arc::clone(&db); - - let a = A(1); - let (recv, _) = dba.primary_watch::(Some(&a.p_key())).unwrap(); - - let handle = std::thread::spawn(move || { - let a = A(1); - let (recv, _) = dba.primary_watch::(Some(&a.p_key())).unwrap(); - let tx = dba.transaction().unwrap(); - { - let a = A(1); - let mut tables = tx.tables(); - tables.insert(&tx, a.clone()).unwrap(); - } - tx.commit().unwrap(); - for _ in 0..1 { - let inner_event: A = if let Event::Insert(event) = recv.recv_timeout(TIMEOUT).unwrap() { - event.inner() - } else { - panic!("wrong event") - }; - assert_eq!(inner_event, a); - } - }); - - let dbb = Arc::clone(&db); - let tx = dbb.transaction().unwrap(); - { - let a = A(1); - let mut tables = tx.tables(); - tables.insert(&tx, a.clone()).unwrap(); - } - tx.commit().unwrap(); - - handle.join().unwrap(); - for _ in 0..2 { - let inner_event: A = if let Event::Insert(event) = recv.recv_timeout(TIMEOUT).unwrap() { - event.inner() - } else { - panic!("wrong event") - }; - assert_eq!(inner_event, a); - } - assert!(recv.try_recv().is_err()); -} - -#[test] -fn watch_outside() { - let tf = tests::init(); - - let a = A(1); - let b1 = B(1); - let b2 = B(2); - - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - - db.define::(); - db.define::(); - - let (recv, _) = db.primary_watch::(Some(&a.p_key())).unwrap(); - - let tx = db.transaction().unwrap(); - { - let mut tables = tx.tables(); - tables.insert(&tx, a.clone()).unwrap(); - tables.insert(&tx, b1.clone()).unwrap(); - tables.insert(&tx, b2.clone()).unwrap(); - } - tx.commit().unwrap(); - - // Check that recv receives only 1 insert event - let inner_event: B = if let Event::Insert(event) = recv.recv_timeout(TIMEOUT).unwrap() { - event.inner() - } else { - panic!("wrong event") - }; - assert_eq!(inner_event, b1); - assert!(recv.try_recv().is_err()); -} - -#[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] -#[struct_db(pk = p_key, gk = s_key)] -struct A1K(u32, String); - -impl A1K { - pub fn p_key(&self) -> Vec { - self.0.to_be_bytes().to_vec() - } - - pub fn s_key(&self) -> Vec { - self.1.as_bytes().to_vec() - } -} - -#[test] -fn watch_one_secondary_key() { - let tf = tests::init(); - - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - - db.define::(); - - let a = A1K(1, "a".to_string()); - - let (recv, _) = db - .secondary_watch::(A1KKey::s_key, Some(&a.s_key())) - .unwrap(); - - let tx = db.transaction().unwrap(); - { - let mut tables = tx.tables(); - tables.insert(&tx, a.clone()).unwrap(); - } - tx.commit().unwrap(); - - for _ in 0..1 { - let inner_event: A1K = if let Event::Insert(event) = recv.recv_timeout(TIMEOUT).unwrap() { - event.inner() - } else { - panic!("wrong event") - }; - assert_eq!(inner_event, a); - } - assert!(recv.try_recv().is_err()); -} - -#[test] -fn watch_all_secondary_keys() { - let tf = tests::init(); - - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - - db.define::(); - - let a1 = A1K(1, "a".to_string()); - let a2 = A1K(2, "b".to_string()); - - let (recv, _) = db.secondary_watch::(A1KKey::s_key, None).unwrap(); - - let tx = db.transaction().unwrap(); - { - let mut tables = tx.tables(); - tables.insert(&tx, a1.clone()).unwrap(); - tables.insert(&tx, a2.clone()).unwrap(); - } - tx.commit().unwrap(); - - for _ in 0..2 { - let inner_event: A1K = if let Event::Insert(event) = recv.recv_timeout(TIMEOUT).unwrap() { - event.inner() - } else { - panic!("wrong event") - }; - assert!(inner_event == a1 || inner_event == a2); - } - assert!(recv.try_recv().is_err()); -} - -#[test] -fn unwatch() { - let tf = tests::init(); - - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - - db.define::(); - - let a = A(1); - - let (recv, recv_id) = db.primary_watch::(Some(&a.p_key())).unwrap(); - - let tx = db.transaction().unwrap(); - { - let mut tables = tx.tables(); - tables.insert(&tx, a.clone()).unwrap(); - } - tx.commit().unwrap(); - - for _ in 0..1 { - let inner_event: A = if let Event::Insert(event) = recv.recv_timeout(TIMEOUT).unwrap() { - event.inner() - } else { - panic!("wrong event") - }; - assert_eq!(inner_event, a); - } - - db.unwatch(recv_id).unwrap(); - - let tx = db.transaction().unwrap(); - { - let mut tables = tx.tables(); - tables.insert(&tx, a.clone()).unwrap(); - } - tx.commit().unwrap(); - assert!(recv.try_recv().is_err()); -} - -#[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] -#[struct_db(pk = p_key)] -struct C(String); - -impl C { - pub fn p_key(&self) -> Vec { - self.0.as_bytes().to_vec() - } -} - -#[test] -fn watch_start_with() { - let tf = tests::init(); - - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - - db.define::(); - - let c1 = C("a_1".to_string()); - let c2 = C("a_2".to_string()); - let c3 = C("b_1".to_string()); - - let (recv, _) = db - .primary_watch_start_with::(&"a".as_bytes().to_vec()) - .unwrap(); - - let tx = db.transaction().unwrap(); - { - let mut tables = tx.tables(); - tables.insert(&tx, c1.clone()).unwrap(); - tables.insert(&tx, c2.clone()).unwrap(); - tables.insert(&tx, c3.clone()).unwrap(); - } - tx.commit().unwrap(); - - for _ in 0..2 { - let inner_event: C = if let Event::Insert(event) = recv.recv_timeout(TIMEOUT).unwrap() { - event.inner() - } else { - panic!("wrong event") - }; - assert!(inner_event == c1 || inner_event == c2); - } - assert!(recv.try_recv().is_err()); -} - -#[test] -fn watch_start_with_by_key() { - let tf = tests::init(); - - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - - db.define::(); - - let a1 = A1K(1, "a_1".to_string()); - let a2 = A1K(2, "a_2".to_string()); - let a3 = A1K(3, "b_1".to_string()); - - let (recv, _) = db - .secondary_watch_start_with::(A1KKey::s_key, &"a".as_bytes().to_vec()) - .unwrap(); - - let tx = db.transaction().unwrap(); - { - let mut tables = tx.tables(); - tables.insert(&tx, a1.clone()).unwrap(); - tables.insert(&tx, a2.clone()).unwrap(); - tables.insert(&tx, a3.clone()).unwrap(); - } - tx.commit().unwrap(); - - for _ in 0..2 { - let inner_event: A1K = if let Event::Insert(event) = recv.recv_timeout(TIMEOUT).unwrap() { - event.inner() - } else { - panic!("wrong event") - }; - assert!(inner_event == a1 || inner_event == a2); - } - assert!(recv.try_recv().is_err()); -} - -#[test] -fn watch_all_delete() { - let tf = tests::init(); - - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - - db.define::(); - - let a = A(1); - - let (recv, _) = db.primary_watch::(None).unwrap(); - - let tx = db.transaction().unwrap(); - { - let mut tables = tx.tables(); - tables.insert(&tx, a.clone()).unwrap(); - } - tx.commit().unwrap(); - - recv.recv_timeout(TIMEOUT).unwrap(); - - let tx = db.transaction().unwrap(); - { - let mut tables = tx.tables(); - tables.remove(&tx, a.clone()).unwrap(); - } - tx.commit().unwrap(); - - for _ in 0..1 { - let r_a: A = if let Event::Delete(event) = recv.recv_timeout(TIMEOUT).unwrap() { - event.inner() - } else { - panic!("wrong event") - }; - assert_eq!(r_a, a); - } - assert!(recv.try_recv().is_err()); -} - -#[test] -fn watch_all_update() { - let tf = tests::init(); - - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - - db.define::(); - - let a1 = A(1); - let a2 = A(2); - - let (recv, _) = db.primary_watch::(None).unwrap(); - - let tx = db.transaction().unwrap(); - { - let mut tables = tx.tables(); - tables.insert(&tx, a1.clone()).unwrap(); - } - tx.commit().unwrap(); - - recv.recv_timeout(TIMEOUT).unwrap(); - - let tx = db.transaction().unwrap(); - { - let mut tables = tx.tables(); - tables.update(&tx, a1.clone(), a2.clone()).unwrap(); - } - tx.commit().unwrap(); - - for _ in 0..1 { - let (old_r_a, new_r_a): (A, A) = - if let Event::Update(event) = recv.recv_timeout(TIMEOUT).unwrap() { - (event.inner_old(), event.inner_new()) - } else { - panic!("wrong event") - }; - assert_eq!(old_r_a, a1); - assert_eq!(new_r_a, a2); - } - assert!(recv.try_recv().is_err()); -} diff --git a/tests/11_watch_tokio.rs b/tests/11_watch_tokio.rs deleted file mode 100644 index 50b98e9c..00000000 --- a/tests/11_watch_tokio.rs +++ /dev/null @@ -1,62 +0,0 @@ -#![cfg(not(feature = "native_model"))] -#![cfg(feature = "tokio")] -mod tests; - -use serde::{Deserialize, Serialize}; -use std::time::Duration; -use struct_db::watch::Event; -use struct_db::*; - -#[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] -#[struct_db(pk = p_key)] -struct A(u32); - -impl A { - pub fn p_key(&self) -> Vec { - self.0.to_be_bytes().to_vec() - } -} - -#[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] -#[struct_db(pk = p_key)] -struct B(u32); - -impl B { - pub fn p_key(&self) -> Vec { - self.0.to_be_bytes().to_vec() - } -} - -const TIMEOUT: Duration = Duration::from_secs(1); - -#[tokio::test] -async fn watch_one_primary_key() { - let tf = tests::init(); - - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - - db.define::(); - - let a = A(1); - - let (mut recv, _) = db.primary_watch::(Some(&a.p_key())).unwrap(); - - let tx = db.transaction().unwrap(); - { - let mut tables = tx.tables(); - tables.insert(&tx, a.clone()).unwrap(); - } - tx.commit().unwrap(); - - for _ in 0..1 { - let inner_event: A = if let Event::Insert(event) = recv.recv().await.unwrap() { - event.inner() - } else { - panic!("wrong event") - }; - assert_eq!(inner_event, a); - } - assert!(recv.try_recv().is_err()); -} - -// TODO: maybe do others tests but it should the same as a std::sync::mpsc::channel. diff --git a/tests/12_migration.rs b/tests/12_migration.rs deleted file mode 100644 index 0747424f..00000000 --- a/tests/12_migration.rs +++ /dev/null @@ -1,106 +0,0 @@ -#![cfg(not(feature = "native_model"))] -mod tests; - -use serde::{Deserialize, Serialize}; -use struct_db::*; - -type Item = ItemV1; - -#[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] -#[struct_db(pk = p_key)] -struct ItemV0(u32); - -impl ItemV0 { - pub fn p_key(&self) -> Vec { - self.0.to_be_bytes().to_vec() - } -} - -#[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] -#[struct_db(pk = p_key)] -struct ItemV1(String); - -impl ItemV1 { - pub fn new(s: &str) -> Self { - Self(s.to_string()) - } - pub fn p_key(&self) -> Vec { - self.0.as_bytes().to_vec() - } -} - -impl From for ItemV1 { - fn from(av1: ItemV0) -> Self { - Self(av1.0.to_string()) - } -} - -#[test] -fn migration() { - let tf = tests::init(); - - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - - db.define::(); - db.define::(); - - let a = ItemV0(42); - - let txn = db.transaction().unwrap(); - { - let mut tables = txn.tables(); - tables.insert(&txn, a.clone()).unwrap(); - } - txn.commit().unwrap(); - - // Check if a is saved - let txn = db.transaction().unwrap(); - { - let mut tables = txn.tables(); - let a1 = tables - .primary_get::(&txn, &a.p_key()) - .unwrap() - .unwrap(); - assert_eq!(a, a1); - } - txn.commit().unwrap(); - - #[cfg(not(feature = "tokio"))] - let (recv_av1, _id) = db.primary_watch::(None).unwrap(); - #[cfg(not(feature = "tokio"))] - let (recv_av2, _id) = db.primary_watch::(None).unwrap(); - - #[cfg(feature = "tokio")] - let (mut recv_av1, _id) = db.primary_watch::(None).unwrap(); - #[cfg(feature = "tokio")] - let (mut recv_av2, _id) = db.primary_watch::(None).unwrap(); - - // Migrate - let txn = db.transaction().unwrap(); - { - let mut tables = txn.tables(); - tables.migrate::(&txn).unwrap(); - } - txn.commit().unwrap(); - - // Check is there is no event from AV1 - assert!(recv_av1.try_recv().is_err()); - // Check is there is no event from AV2 - assert!(recv_av2.try_recv().is_err()); - - // Check migration - let txn = db.read_transaction().unwrap(); - { - let mut tables = txn.tables(); - let len_av1 = tables.len::(&txn).unwrap(); - assert_eq!(len_av1, 0); - let len_av2 = tables.len::(&txn).unwrap(); - assert_eq!(len_av2, 1); - - let a2 = tables - .primary_get::(&txn, "42".as_bytes()) - .unwrap() - .unwrap(); - assert_eq!(a2, Item::new("42")); - } -} diff --git a/tests/13_util.rs b/tests/13_util.rs deleted file mode 100644 index e01bd584..00000000 --- a/tests/13_util.rs +++ /dev/null @@ -1,40 +0,0 @@ -#![cfg(not(feature = "native_model"))] -mod tests; - -use struct_db::*; - -#[test] -fn test_builder() { - let tf = tests::init(); - // Create without error - let mut _db = Builder::new().create(&tf.path("test")).unwrap(); -} - -#[test] -fn test_builder_with_set_cache_size() { - let tf = tests::init(); - // Create without error - let mut _db = Builder::new() - .set_cache_size(100) - .create(&tf.path("test")) - .unwrap(); -} - -#[test] -fn test_open_unexisting_database() { - let tf = tests::init(); - // Open an unexisting database - assert!(Builder::new().open(&tf.path("test")).is_err()); -} - -#[test] -fn test_open_existing_database() { - let tf = tests::init(); - - // Create a database - let db = Builder::new().create(&tf.path("test")).unwrap(); - drop(db); - - // Open an existing database - let _db = Builder::new().open(&tf.path("test")).unwrap(); -} diff --git a/tests/convert_all.rs b/tests/convert_all.rs new file mode 100644 index 00000000..3ab8f2b1 --- /dev/null +++ b/tests/convert_all.rs @@ -0,0 +1,89 @@ +use native_db::*; +use native_model::{native_model, Model}; +use serde::{Deserialize, Serialize}; +use shortcut_assert_fs::TmpFs; + +type Item = ItemV1; + +#[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] +#[native_model(id = 1, version = 1)] +#[native_db] +struct ItemV0 { + #[primary_key] + pub id: u32, +} + +#[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] +#[native_model(id = 2, version = 1)] +#[native_db] +struct ItemV1 { + #[primary_key] + pub id: String, +} + +impl From for ItemV1 { + fn from(item: ItemV0) -> Self { + ItemV1 { + id: item.id.to_string(), + } + } +} + +#[test] +fn convert_all() { + let tf = TmpFs::new().unwrap(); + + let mut builder = DatabaseBuilder::new(); + builder.define::().unwrap(); + builder.define::().unwrap(); + let db = builder.create(tf.path("test").as_std_path()).unwrap(); + + let a = ItemV0 { id: 42 }; + + let rw_txn = db.rw_transaction().unwrap(); + rw_txn.insert(a.clone()).unwrap(); + rw_txn.commit().unwrap(); + + // Check if a is saved + let txn = db.rw_transaction().unwrap(); + let a1 = txn.get().primary(a.id).unwrap().unwrap(); + assert_eq!(a, a1); + txn.commit().unwrap(); + + #[allow(unused_mut)] + #[cfg(not(feature = "tokio"))] + let (mut recv_av1, _id) = db.watch().scan().primary().all::().unwrap(); + #[allow(unused_mut)] + #[cfg(not(feature = "tokio"))] + let (mut recv_av2, _id) = db.watch().scan().primary().all::().unwrap(); + + #[cfg(feature = "tokio")] + let (mut recv_av1, _id) = db.watch().scan().primary().all::().unwrap(); + #[cfg(feature = "tokio")] + let (mut recv_av2, _id) = db.watch().scan().primary().all::().unwrap(); + + // Migrate + let rw_txn = db.rw_transaction().unwrap(); + rw_txn.convert_all::().unwrap(); + rw_txn.commit().unwrap(); + + // Check is there is no event from AV1 + assert!(recv_av1.try_recv().is_err()); + // Check is there is no event from AV2 + assert!(recv_av2.try_recv().is_err()); + + // Check migration + let r_txn = db.r_transaction().unwrap(); + let len_av1 = r_txn.len().primary::().unwrap(); + assert_eq!(len_av1, 0); + let len_av2 = r_txn.len().primary::().unwrap(); + assert_eq!(len_av2, 1); + + let a2: Item = r_txn.get().primary("42").unwrap().unwrap(); + assert_eq!( + a2, + Item { + id: "42".to_string() + } + ); +} diff --git a/tests/macro_def/mod.rs b/tests/macro_def/mod.rs index 248136a0..b620a0af 100644 --- a/tests/macro_def/mod.rs +++ b/tests/macro_def/mod.rs @@ -1,2 +1,5 @@ -mod only_primary_key; -mod with_secondary_keys; +mod primary_key; +mod primary_key_attribute; +mod secondary_key; +mod secondary_key_attribute; +mod secondary_key_mix; diff --git a/tests/macro_def/only_primary_key.rs b/tests/macro_def/only_primary_key.rs deleted file mode 100644 index c9442129..00000000 --- a/tests/macro_def/only_primary_key.rs +++ /dev/null @@ -1,28 +0,0 @@ -#![cfg(not(feature = "native_model"))] - -use serde::{Deserialize, Serialize}; -use struct_db::*; - -#[derive(Serialize, Deserialize, Eq, PartialEq, Debug)] -#[struct_db(pk = generate_my_primary_key)] -struct Item { - id: u32, - name: String, -} - -impl Item { - pub fn generate_my_primary_key(&self) -> Vec { - format!("{}-{}", self.id, self.name).into() - } -} - -#[test] -fn test_insert_my_item() { - let item = Item { - id: 1, - name: "test".to_string(), - }; - - let key: Vec = item.struct_db_pk(); - assert_eq!(key, "1-test".as_bytes()); -} diff --git a/tests/macro_def/primary_key.rs b/tests/macro_def/primary_key.rs new file mode 100644 index 00000000..7f5ab214 --- /dev/null +++ b/tests/macro_def/primary_key.rs @@ -0,0 +1,28 @@ +use native_db::*; +use native_model::{native_model, Model}; +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize, Eq, PartialEq, Debug)] +#[native_model(id = 1, version = 1)] +#[native_db(primary_key(compute_primary_key))] +struct Item { + id: u32, + name: String, +} + +impl Item { + pub fn compute_primary_key(&self) -> String { + format!("{}-{}", self.id, self.name) + } +} + +#[test] +fn test_insert_my_item() { + let item = Item { + id: 1, + name: "test".to_string(), + }; + + let key = item.native_db_primary_key(); + assert_eq!(key, "1-test".database_inner_key_value()); +} diff --git a/tests/macro_def/primary_key_attribute.rs b/tests/macro_def/primary_key_attribute.rs new file mode 100644 index 00000000..d46342e1 --- /dev/null +++ b/tests/macro_def/primary_key_attribute.rs @@ -0,0 +1,24 @@ +use native_db::*; +use native_model::{native_model, Model}; +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize, Eq, PartialEq, Debug)] +#[native_model(id = 1, version = 1)] +#[native_db] +struct Item { + #[primary_key] + id: u32, + name: String, +} +// TODO: Test for other type enum tuple etc ... + +#[test] +fn test_insert_my_item() { + let item = Item { + id: 1, + name: "test".to_string(), + }; + + let key = item.native_db_primary_key(); + assert_eq!(key, 1_u32.database_inner_key_value()); +} diff --git a/tests/macro_def/secondary_key.rs b/tests/macro_def/secondary_key.rs new file mode 100644 index 00000000..a6f067f9 --- /dev/null +++ b/tests/macro_def/secondary_key.rs @@ -0,0 +1,156 @@ +use native_db::db_type::{DatabaseKeyDefinition, DatabaseKeyValue, Input}; +use native_db::*; +use native_model::{native_model, Model}; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; + +#[derive(Serialize, Deserialize, Eq, PartialEq, Debug)] +#[native_model(id = 1, version = 1)] +#[native_db(primary_key(compute_primary_key), secondary_key(compute_secondary_key))] +struct ItemSecondary { + id: u32, + name: String, +} + +impl ItemSecondary { + pub fn compute_primary_key(&self) -> String { + format!("{}-{}", self.id, self.name) + } + pub fn compute_secondary_key(&self) -> String { + format!("{}-{}", self.name, self.id) + } +} + +#[test] +fn test_secondary() { + let item = ItemSecondary { + id: 1, + name: "test".to_string(), + }; + + let primary_key = item.native_db_primary_key(); + assert_eq!(primary_key, "1-test".database_inner_key_value()); + + let secondary_key: HashMap<_, DatabaseKeyValue> = item.native_db_secondary_keys(); + assert_eq!(secondary_key.len(), 1); + assert_eq!( + secondary_key + .get(&DatabaseKeyDefinition::new( + 1, + 1, + "itemsecondary_compute_secondary_key", + Default::default() + )) + .unwrap(), + &DatabaseKeyValue::Default("test-1".database_inner_key_value()) + ); +} + +#[derive(Serialize, Deserialize, Eq, PartialEq, Debug)] +#[native_model(id = 2, version = 1)] +#[native_db( + primary_key(compute_primary_key), + secondary_key(compute_secondary_key, unique) +)] +struct ItemSecondaryUnique { + id: u32, + name: String, +} + +impl ItemSecondaryUnique { + pub fn compute_primary_key(&self) -> String { + format!("{}-{}", self.id, self.name) + } + pub fn compute_secondary_key(&self) -> String { + format!("{}-{}", self.name, self.id) + } +} + +#[test] +fn test_secondary_unique() { + let item = ItemSecondaryUnique { + id: 1, + name: "test".to_string(), + }; + + let primary_key = item.native_db_primary_key(); + assert_eq!(primary_key, "1-test".database_inner_key_value()); + + let secondary_key: HashMap<_, DatabaseKeyValue> = item.native_db_secondary_keys(); + assert_eq!(secondary_key.len(), 1); + assert_eq!( + secondary_key + .get(&DatabaseKeyDefinition::new( + 2, + 1, + "itemsecondaryunique_compute_secondary_key", + Default::default() + )) + .unwrap(), + &DatabaseKeyValue::Default("test-1".database_inner_key_value()) + ); +} + +#[derive(Serialize, Deserialize, Eq, PartialEq, Debug)] +#[native_model(id = 2, version = 1)] +#[native_db( + primary_key(compute_primary_key), + secondary_key(compute_secondary_key, optional) +)] +struct ItemSecondaryOptional { + id: u32, + name: Option, +} + +impl ItemSecondaryOptional { + pub fn compute_primary_key(&self) -> String { + format!("{}", self.id) + } + pub fn compute_secondary_key(&self) -> Option { + if let Some(name) = &self.name { + Some(format!("{}-{}", name, self.id)) + } else { + None + } + } +} + +#[test] +fn test_secondary_optional() { + let item = ItemSecondaryOptional { + id: 1, + name: Some("test".to_string()), + }; + + let primary_key = item.native_db_primary_key(); + assert_eq!(primary_key, "1".database_inner_key_value()); + + let secondary_key: HashMap<_, DatabaseKeyValue> = item.native_db_secondary_keys(); + assert_eq!(secondary_key.len(), 1); + assert_eq!( + secondary_key + .get(&DatabaseKeyDefinition::new( + 2, + 1, + "itemsecondaryoptional_compute_secondary_key", + Default::default() + )) + .unwrap(), + &DatabaseKeyValue::Optional(Some("test-1".database_inner_key_value())) + ); + + let item_none = ItemSecondaryOptional { id: 2, name: None }; + let secondary_key: HashMap<_, DatabaseKeyValue> = item_none.native_db_secondary_keys(); + assert_eq!(secondary_key.len(), 1); + assert_eq!( + secondary_key + .get(&DatabaseKeyDefinition::new( + 2, + 1, + "itemsecondaryoptional_compute_secondary_key", + Default::default() + )) + .unwrap(), + &DatabaseKeyValue::Optional(None) + ); +} diff --git a/tests/macro_def/secondary_key_attribute.rs b/tests/macro_def/secondary_key_attribute.rs new file mode 100644 index 00000000..779ec4ae --- /dev/null +++ b/tests/macro_def/secondary_key_attribute.rs @@ -0,0 +1,174 @@ +use native_db::db_type::{DatabaseKeyDefinition, DatabaseKeyValue, Input}; +use native_db::*; +use native_model::{native_model, Model}; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; + +#[derive(Serialize, Deserialize, Eq, PartialEq, Debug)] +#[native_model(id = 1, version = 1)] +#[native_db] +struct ItemSecondary { + #[primary_key] + id: u32, + #[secondary_key] + name: String, +} + +#[test] +fn test_secondary() { + let item = ItemSecondary { + id: 1, + name: "test".to_string(), + }; + + let primary_key = item.native_db_primary_key(); + assert_eq!(primary_key, 1u32.database_inner_key_value()); + + let secondary_key: HashMap<_, DatabaseKeyValue> = item.native_db_secondary_keys(); + assert_eq!(secondary_key.len(), 1); + assert_eq!( + secondary_key + .get(&DatabaseKeyDefinition::new( + 1, + 1, + "itemsecondary_name", + Default::default() + )) + .unwrap(), + &DatabaseKeyValue::Default("test".database_inner_key_value()) + ); +} + +#[derive(Serialize, Deserialize, Eq, PartialEq, Debug)] +#[native_model(id = 2, version = 1)] +#[native_db] +struct ItemSecondaryOptional { + #[primary_key] + id: u32, + #[secondary_key(optional)] + name: Option, +} + +#[test] +fn test_secondary_optional() { + let item = ItemSecondaryOptional { + id: 1, + name: Some("test".to_string()), + }; + + let primary_key = item.native_db_primary_key(); + assert_eq!(primary_key, 1u32.database_inner_key_value()); + + let secondary_key: HashMap<_, DatabaseKeyValue> = item.native_db_secondary_keys(); + assert_eq!(secondary_key.len(), 1); + assert_eq!( + secondary_key + .get(&DatabaseKeyDefinition::new( + 2, + 1, + "itemsecondaryoptional_name", + Default::default() + )) + .unwrap(), + &DatabaseKeyValue::Optional(Some("test".database_inner_key_value())) + ); + + let item_none = ItemSecondaryOptional { id: 2, name: None }; + let secondary_key: HashMap<_, DatabaseKeyValue> = item_none.native_db_secondary_keys(); + assert_eq!(secondary_key.len(), 1); + assert_eq!( + secondary_key + .get(&DatabaseKeyDefinition::new( + 2, + 1, + "itemsecondaryoptional_name", + Default::default() + )) + .unwrap(), + &DatabaseKeyValue::Optional(None) + ); +} + +#[derive(Serialize, Deserialize, Eq, PartialEq, Debug)] +#[native_model(id = 3, version = 1)] +#[native_db] +struct ItemSecondaryUnique { + #[primary_key] + id: u32, + #[secondary_key(unique)] + name: String, +} + +#[test] +fn test_secondary_unique() { + let item = ItemSecondaryUnique { + id: 1, + name: "test".to_string(), + }; + + let primary_key = item.native_db_primary_key(); + assert_eq!(primary_key, 1u32.database_inner_key_value()); + + let secondary_key: HashMap<_, DatabaseKeyValue> = item.native_db_secondary_keys(); + assert_eq!(secondary_key.len(), 1); + assert_eq!( + secondary_key + .get(&DatabaseKeyDefinition::new( + 3, + 1, + "itemsecondaryunique_name", + Default::default() + )) + .unwrap(), + &DatabaseKeyValue::Default("test".database_inner_key_value()) + ); +} + +#[derive(Serialize, Deserialize, Eq, PartialEq, Debug)] +#[native_model(id = 4, version = 1)] +#[native_db] +struct ItemSecondaryOthers { + #[primary_key] + id: u32, + #[secondary_key(unique)] + name: String, + #[secondary_key()] + name2: String, +} + +#[test] +fn test_secondary_others() { + let item = ItemSecondaryOthers { + id: 1, + name: "test".to_string(), + name2: "test2".to_string(), + }; + + let primary_key = item.native_db_primary_key(); + assert_eq!(primary_key, 1u32.database_inner_key_value()); + + let secondary_key: HashMap<_, DatabaseKeyValue> = item.native_db_secondary_keys(); + assert_eq!(secondary_key.len(), 2); + assert_eq!( + secondary_key + .get(&DatabaseKeyDefinition::new( + 4, + 1, + "itemsecondaryothers_name", + Default::default() + )) + .unwrap(), + &DatabaseKeyValue::Default("test".database_inner_key_value()) + ); + assert_eq!( + secondary_key + .get(&DatabaseKeyDefinition::new( + 4, + 1, + "itemsecondaryothers_name2", + Default::default() + )) + .unwrap(), + &DatabaseKeyValue::Default("test2".database_inner_key_value()) + ); +} diff --git a/tests/macro_def/secondary_key_mix.rs b/tests/macro_def/secondary_key_mix.rs new file mode 100644 index 00000000..86e1f950 --- /dev/null +++ b/tests/macro_def/secondary_key_mix.rs @@ -0,0 +1,61 @@ +use native_db::db_type::Input; +use native_db::db_type::{DatabaseKeyDefinition, DatabaseKeyValue}; +use native_db::*; +use native_model::{native_model, Model}; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; + +#[derive(Serialize, Deserialize, Eq, PartialEq, Debug)] +#[native_model(id = 1, version = 1)] +#[native_db(primary_key(compute_primary_key), secondary_key(compute_secondary_key))] +struct ItemSecondaryMix { + id: u32, + #[secondary_key(unique)] + name: String, +} + +impl ItemSecondaryMix { + pub fn compute_primary_key(&self) -> String { + format!("{}-{}", self.id, self.name) + } + pub fn compute_secondary_key(&self) -> String { + format!("{}-{}", self.name, self.id) + } +} + +#[test] +fn test_secondary() { + let item = ItemSecondaryMix { + id: 1, + name: "test".to_string(), + }; + + let primary_key = item.native_db_primary_key(); + assert_eq!(primary_key, "1-test".database_inner_key_value()); + + let secondary_key: HashMap<_, DatabaseKeyValue> = item.native_db_secondary_keys(); + assert_eq!(secondary_key.len(), 2); + assert_eq!( + secondary_key + .get(&DatabaseKeyDefinition::new( + 1, + 1, + "itemsecondarymix_compute_secondary_key", + Default::default() + )) + .unwrap(), + &DatabaseKeyValue::Default("test-1".database_inner_key_value()) + ); + + assert_eq!( + secondary_key + .get(&DatabaseKeyDefinition::new( + 1, + 1, + "itemsecondarymix_name", + Default::default() + )) + .unwrap(), + &DatabaseKeyValue::Default("test".database_inner_key_value()) + ); +} diff --git a/tests/macro_def/with_secondary_keys.rs b/tests/macro_def/with_secondary_keys.rs deleted file mode 100644 index ebeeb9b9..00000000 --- a/tests/macro_def/with_secondary_keys.rs +++ /dev/null @@ -1,43 +0,0 @@ -#![cfg(not(feature = "native_model"))] - -use serde::{Deserialize, Serialize}; -use std::collections::HashMap; -use struct_db::SDBItem; -use struct_db::*; - -#[derive(Serialize, Deserialize, Eq, PartialEq, Debug)] -#[struct_db( - pk = generate_my_primary_key, - gk = generate_my_secondary_key -)] -struct Item { - id: u32, - name: String, -} - -impl Item { - pub fn generate_my_primary_key(&self) -> Vec { - format!("{}-{}", self.id, self.name).into() - } - pub fn generate_my_secondary_key(&self) -> Vec { - format!("{}-{}", self.name, self.id).into() - } -} - -#[test] -fn test_insert_my_item() { - let item = Item { - id: 1, - name: "test".to_string(), - }; - - let primary_key: Vec = item.struct_db_pk(); - assert_eq!(primary_key, "1-test".as_bytes()); - - let secondary_key: HashMap<_, Vec> = item.struct_db_gks(); - assert_eq!(secondary_key.len(), 1); - assert_eq!( - secondary_key.get("item_generate_my_secondary_key").unwrap(), - "test-1".as_bytes() - ); -} diff --git a/tests/migrate/only_primary_key.rs b/tests/migrate/only_primary_key.rs index 5b616e81..36695d48 100644 --- a/tests/migrate/only_primary_key.rs +++ b/tests/migrate/only_primary_key.rs @@ -1,12 +1,11 @@ -use native_model::native_model; +use native_db::*; +use native_model::{native_model, Model}; use serde::{Deserialize, Serialize}; use shortcut_assert_fs::TmpFs; -use struct_db::ReadableTable; -use struct_db::{struct_db, Db}; #[derive(Serialize, Deserialize, Eq, PartialEq, Debug)] #[native_model(id = 1, version = 1)] -#[struct_db(pk = generate_my_primary_key)] +#[native_db(primary_key(generate_my_primary_key))] struct ItemV1 { id: u32, name: String, @@ -14,14 +13,14 @@ struct ItemV1 { impl ItemV1 { #[allow(dead_code)] - pub fn generate_my_primary_key(&self) -> Vec { - format!("{}-{}", self.id, self.name).into() + pub fn generate_my_primary_key(&self) -> String { + format!("{}-{}", self.id, self.name) } } #[derive(Serialize, Deserialize, Eq, PartialEq, Debug)] #[native_model(id = 1, version = 2, from = ItemV1)] -#[struct_db(pk = generate_my_primary_key)] +#[native_db(primary_key(generate_my_primary_key))] struct ItemV2 { id: u64, name: String, @@ -47,29 +46,30 @@ impl From for ItemV1 { impl ItemV2 { #[allow(dead_code)] - pub fn generate_my_primary_key(&self) -> Vec { - format!("{}-{}", self.id, self.name).into() + pub fn generate_my_primary_key(&self) -> String { + format!("{}-{}", self.id, self.name) } } #[test] fn test_migrate() { let tf = TmpFs::new().unwrap(); - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - db.define::().unwrap(); + let mut builder = DatabaseBuilder::new(); + builder.define::().unwrap(); + let db = builder.create(tf.path("test").as_std_path()).unwrap(); let item = ItemV1 { id: 1, name: "test".to_string(), }; - let txn = db.transaction().unwrap(); - txn.tables().insert(&txn, item).unwrap(); - txn.commit().unwrap(); + let rw_txn = db.rw_transaction().unwrap(); + rw_txn.insert(item).unwrap(); + rw_txn.commit().unwrap(); - let txn = db.read_transaction().unwrap(); + let r_txn = db.r_transaction().unwrap(); - let item: ItemV1 = txn.tables().primary_get(&txn, b"1-test").unwrap().unwrap(); + let item: ItemV1 = r_txn.get().primary("1-test").unwrap().unwrap(); assert_eq!( item, ItemV1 { @@ -77,17 +77,20 @@ fn test_migrate() { name: "test".to_string(), } ); - drop(txn); + drop(r_txn); drop(db); - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - db.define::().unwrap(); - db.define::().unwrap(); + let mut builder = DatabaseBuilder::new(); + builder.define::().unwrap(); + builder.define::().unwrap(); + let db = builder.create(tf.path("test").as_std_path()).unwrap(); - db.migrate::().unwrap(); + let rw = db.rw_transaction().unwrap(); + rw.migrate::().unwrap(); + rw.commit().unwrap(); - let txn = db.read_transaction().unwrap(); - let item: ItemV2 = txn.tables().primary_get(&txn, b"1-test").unwrap().unwrap(); + let r_txn = db.r_transaction().unwrap(); + let item: ItemV2 = r_txn.get().primary("1-test").unwrap().unwrap(); assert_eq!( item, ItemV2 { @@ -96,10 +99,11 @@ fn test_migrate() { } ); - let redb_stats = db.redb_stats().unwrap(); - assert_eq!(redb_stats.stats_tables.len(), 2); - assert_eq!(redb_stats.stats_tables[0].name, "itemv1"); - assert_eq!(redb_stats.stats_tables[0].num_raw, 0); - assert_eq!(redb_stats.stats_tables[1].name, "itemv2"); - assert_eq!(redb_stats.stats_tables[1].num_raw, 1); + let stats = db.redb_stats().unwrap(); + assert_eq!(stats.primary_tables.len(), 2); + assert_eq!(stats.primary_tables[0].name, "1_1_generate_my_primary_key"); + assert_eq!(stats.primary_tables[0].n_entries, Some(0)); + assert_eq!(stats.primary_tables[1].name, "1_2_generate_my_primary_key"); + assert_eq!(stats.primary_tables[1].n_entries, Some(1)); + assert_eq!(stats.secondary_tables.len(), 0); } diff --git a/tests/migrate/with_secondary_keys.rs b/tests/migrate/with_secondary_keys.rs index 0231d834..980c4553 100644 --- a/tests/migrate/with_secondary_keys.rs +++ b/tests/migrate/with_secondary_keys.rs @@ -1,25 +1,27 @@ -use native_model::native_model; +use native_db::*; +use native_model::{native_model, Model}; use serde::{Deserialize, Serialize}; use shortcut_assert_fs::TmpFs; -use struct_db::{struct_db, Db, ReadableTable}; +use std::convert::TryFrom; +use std::convert::TryInto; #[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] #[native_model(id = 1, version = 1)] -#[struct_db(pk = id_key, gk = name_key)] +#[native_db(primary_key(id_key), secondary_key(name_key))] struct ItemV1 { id: u32, name: String, } impl ItemV1 { - pub fn id_key(&self) -> Vec { - self.id.to_be_bytes().to_vec() + pub fn id_key(&self) -> u32 { + self.id } - pub fn name_key(&self) -> Vec { - let mut tag = self.name.as_bytes().to_vec(); - let primary_key = self.id_key(); - tag.extend_from_slice(&primary_key); + pub fn name_key(&self) -> String { + let mut tag = self.name.clone(); + let primary_key = self.id_key().to_string(); + tag.push_str(&primary_key); tag } pub fn inc(&mut self, new_name: &str) -> &Self { @@ -31,10 +33,10 @@ impl ItemV1 { #[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] #[native_model(id = 1, version = 2, from = ItemV1)] -#[struct_db( - pk = id_key, - gk = first_name_key, - gk = last_name_key +#[native_db( + primary_key(id_key), + secondary_key(first_name_key, unique), + secondary_key(last_name_key, unique) )] struct ItemV2 { id: u64, @@ -43,21 +45,21 @@ struct ItemV2 { } impl ItemV2 { - pub fn id_key(&self) -> Vec { - self.id.to_be_bytes().to_vec() + pub fn id_key(&self) -> u64 { + self.id } - pub fn first_name_key(&self) -> Vec { - let mut tag = self.first_name.as_bytes().to_vec(); - let primary_key = self.id_key(); - tag.extend_from_slice(&primary_key); + pub fn first_name_key(&self) -> String { + let mut tag = self.first_name.clone(); + let primary_key = self.id_key().to_string(); + tag.push_str(&primary_key); tag } - pub fn last_name_key(&self) -> Vec { - let mut tag = self.last_name.as_bytes().to_vec(); - let primary_key = self.id_key(); - tag.extend_from_slice(&primary_key); + pub fn last_name_key(&self) -> String { + let mut tag = self.last_name.clone(); + let primary_key = self.id_key().to_string(); + tag.push_str(&primary_key); tag } } @@ -87,66 +89,63 @@ impl From for ItemV1 { #[test] fn test_migrate() { let tf = TmpFs::new().unwrap(); - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - db.define::().unwrap(); + let mut builder = DatabaseBuilder::new(); + builder.define::().unwrap(); + let db = builder.create(tf.path("test").as_std_path()).unwrap(); let mut item = ItemV1 { id: 1, name: "test".to_string(), }; - let txn = db.transaction().unwrap(); + let rw_txn = db.rw_transaction().unwrap(); { - let mut tables = txn.tables(); - tables.insert(&txn, item.clone()).unwrap(); - tables - .insert(&txn, item.inc("Victor Hugo").clone()) - .unwrap(); - tables - .insert(&txn, item.inc("Jules Verne").clone()) - .unwrap(); - tables - .insert(&txn, item.inc("Alexandre Dumas").clone()) - .unwrap(); - tables.insert(&txn, item.inc("Emile Zola").clone()).unwrap(); + rw_txn.insert(item.clone()).unwrap(); + rw_txn.insert(item.inc("Victor Hugo").clone()).unwrap(); + rw_txn.insert(item.inc("Jules Verne").clone()).unwrap(); + rw_txn.insert(item.inc("Alexandre Dumas").clone()).unwrap(); + rw_txn.insert(item.inc("Emile Zola").clone()).unwrap(); } - txn.commit().unwrap(); + rw_txn.commit().unwrap(); let stats = db.redb_stats().unwrap(); - assert_eq!(stats.stats_tables.len(), 2); - assert_eq!(stats.stats_tables[0].name, "itemv1"); - assert_eq!(stats.stats_tables[0].num_raw, 5); - assert_eq!(stats.stats_tables[1].name, "itemv1_name_key"); - assert_eq!(stats.stats_tables[1].num_raw, 5); + assert_eq!(stats.primary_tables.len(), 1); + assert_eq!(stats.primary_tables[0].name, "1_1_id_key"); + assert_eq!(stats.primary_tables[0].n_entries, Some(5)); + assert_eq!(stats.secondary_tables.len(), 1); + assert_eq!(stats.secondary_tables[0].name, "1_1_name_key"); + assert_eq!(stats.secondary_tables[0].n_entries, Some(5)); + drop(db); - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - db.define::().unwrap(); - db.define::().unwrap(); + let mut builder = DatabaseBuilder::new(); + builder.define::().unwrap(); + builder.define::().unwrap(); + let db = builder.create(tf.path("test").as_std_path()).unwrap(); - db.migrate::().unwrap(); + let rw = db.rw_transaction().unwrap(); + rw.migrate::().unwrap(); + rw.commit().unwrap(); let stats = db.redb_stats().unwrap(); - assert_eq!(stats.stats_tables.len(), 5); - assert_eq!(stats.stats_tables[0].name, "itemv1"); - assert_eq!(stats.stats_tables[0].num_raw, 0); - assert_eq!(stats.stats_tables[1].name, "itemv1_name_key"); - assert_eq!(stats.stats_tables[1].num_raw, 0); - assert_eq!(stats.stats_tables[2].name, "itemv2"); - assert_eq!(stats.stats_tables[2].num_raw, 5); - assert_eq!(stats.stats_tables[3].name, "itemv2_first_name_key"); - assert_eq!(stats.stats_tables[3].num_raw, 5); - assert_eq!(stats.stats_tables[4].name, "itemv2_last_name_key"); - assert_eq!(stats.stats_tables[4].num_raw, 5); - - let txn = db.read_transaction().unwrap(); + assert_eq!(stats.primary_tables.len(), 2); + assert_eq!(stats.primary_tables[1].name, "1_2_id_key"); + assert_eq!(stats.primary_tables[1].n_entries, Some(5)); + assert_eq!(stats.primary_tables[0].name, "1_1_id_key"); + assert_eq!(stats.primary_tables[0].n_entries, Some(0)); + dbg!(&stats.secondary_tables); + assert_eq!(stats.secondary_tables.len(), 3); + assert_eq!(stats.secondary_tables[0].name, "1_1_name_key"); + assert_eq!(stats.secondary_tables[0].n_entries, Some(0)); + assert_eq!(stats.secondary_tables[1].name, "1_2_first_name_key"); + assert_eq!(stats.secondary_tables[1].n_entries, Some(5)); + assert_eq!(stats.secondary_tables[2].name, "1_2_last_name_key"); + assert_eq!(stats.secondary_tables[2].n_entries, Some(5)); + + let r_txn = db.r_transaction().unwrap(); // Get Victor Hugo by id - let item: ItemV2 = txn - .tables() - .primary_get(&txn, 2_u64.to_be_bytes().as_slice()) - .unwrap() - .unwrap(); + let item: ItemV2 = r_txn.get().primary(2_u64).unwrap().unwrap(); assert_eq!( item, ItemV2 { @@ -157,10 +156,11 @@ fn test_migrate() { ); // Get Alexandre Dumas by first name - let item: Vec = txn - .tables() - .secondary_iter_start_with(&txn, ItemV2Key::first_name_key, b"Alexandre") + let item: Vec = r_txn + .scan() + .secondary(ItemV2Key::first_name_key) .unwrap() + .start_with("Alexandre") .collect(); assert_eq!( item, @@ -172,10 +172,11 @@ fn test_migrate() { ); // Get Julien Verne by last name - let item: Vec = txn - .tables() - .secondary_iter_start_with(&txn, ItemV2Key::last_name_key, b"Verne") + let item: Vec = r_txn + .scan() + .secondary(ItemV2Key::last_name_key) .unwrap() + .start_with("Verne") .collect(); assert_eq!( item, @@ -186,3 +187,95 @@ fn test_migrate() { }] ); } + +#[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] +#[native_model(id = 1, version = 3, try_from = (ItemV2, db_type::Error))] +#[native_db] +struct ItemV3 { + #[primary_key] + id: u64, + first_name: String, + #[secondary_key] + last_name: String, +} + +impl TryFrom for ItemV2 { + type Error = db_type::Error; + fn try_from(item: ItemV3) -> Result { + Ok(ItemV2 { + id: item.id, + first_name: item.first_name, + last_name: item.last_name, + }) + } +} + +impl TryFrom for ItemV3 { + type Error = db_type::Error; + fn try_from(item: ItemV2) -> Result { + Ok(ItemV3 { + id: item.id, + first_name: item.first_name, + last_name: item.last_name, + }) + } +} + +#[test] +fn test_migrate_v3() { + let tf = TmpFs::new().unwrap(); + let mut builder = DatabaseBuilder::new(); + builder.define::().unwrap(); + let db = builder.create(tf.path("test").as_std_path()).unwrap(); + + let mut item = ItemV1 { + id: 1, + name: "test".to_string(), + }; + + let rw_txn = db.rw_transaction().unwrap(); + { + rw_txn.insert(item.clone()).unwrap(); + rw_txn.insert(item.inc("Victor Hugo").clone()).unwrap(); + rw_txn.insert(item.inc("Jules Verne").clone()).unwrap(); + rw_txn.insert(item.inc("Alexandre Dumas").clone()).unwrap(); + rw_txn.insert(item.inc("Emile Zola").clone()).unwrap(); + } + rw_txn.commit().unwrap(); + + drop(db); + + let mut builder = DatabaseBuilder::new(); + builder.define::().unwrap(); + builder.define::().unwrap(); + builder.define::().unwrap(); + let db = builder.open(tf.path("test").as_std_path()).unwrap(); + + // Return error because the latest version is Item is ItemV3 + let rw = db.rw_transaction().unwrap(); + let error = rw.migrate::().unwrap_err(); + assert!(matches!(error, db_type::Error::MigrateLegacyModel(_))); + rw.commit().unwrap(); + + let rw = db.rw_transaction().unwrap(); + rw.migrate::().unwrap(); + rw.commit().unwrap(); + + let stats = db.redb_stats().unwrap(); + assert_eq!(stats.primary_tables.len(), 3); + assert_eq!(stats.primary_tables[0].name, "1_1_id_key"); + assert_eq!(stats.primary_tables[0].n_entries, Some(0)); + assert_eq!(stats.primary_tables[1].name, "1_2_id_key"); + assert_eq!(stats.primary_tables[1].n_entries, None); + assert_eq!(stats.primary_tables[2].name, "1_3_id"); + assert_eq!(stats.primary_tables[2].n_entries, Some(5)); + assert_eq!(stats.secondary_tables.len(), 4); + assert_eq!(stats.secondary_tables[0].name, "1_1_name_key"); + assert_eq!(stats.secondary_tables[0].n_entries, Some(0)); + assert_eq!(stats.secondary_tables[1].name, "1_2_first_name_key"); + assert_eq!(stats.secondary_tables[1].n_entries, None); + assert_eq!(stats.secondary_tables[2].name, "1_2_last_name_key"); + assert_eq!(stats.secondary_tables[2].n_entries, None); + assert_eq!(stats.secondary_tables[3].name, "1_3_last_name"); + assert_eq!(stats.secondary_tables[3].n_entries, Some(5)); +} diff --git a/tests/modules.rs b/tests/modules.rs index 2c0b8a4f..7df0e49c 100644 --- a/tests/modules.rs +++ b/tests/modules.rs @@ -1,4 +1,5 @@ -mod macro_def; -#[cfg(feature = "native_model")] +// mod macro_def; mod migrate; mod primary_drain; +mod query; +mod watch; diff --git a/tests/14_native_model.rs b/tests/native_model.rs similarity index 84% rename from tests/14_native_model.rs rename to tests/native_model.rs index 759e65cb..8f7fb942 100644 --- a/tests/14_native_model.rs +++ b/tests/native_model.rs @@ -1,14 +1,9 @@ -#![cfg(feature = "native_model")] - -mod tests; - -use serde::{Deserialize, Serialize}; -use struct_db::*; -use struct_db_macro::struct_db; - use bincode; use bincode::{config, Decode, Encode}; -use native_model::native_model; +use native_db::*; +use native_db_macro::native_db; +use native_model::{native_model, Model}; +use serde::{Deserialize, Serialize}; pub struct Bincode; impl native_model::Encode for Bincode { @@ -27,7 +22,7 @@ impl native_model::Decode for Bincode { #[derive(Serialize, Deserialize, Encode, Decode, Eq, PartialEq, Debug)] #[native_model(id = 1, version = 1, with = Bincode)] -#[struct_db(pk = generate_my_primary_key)] +#[native_db(primary_key(compute_primary_key))] struct ItemV1 { id: u32, name: String, @@ -35,7 +30,7 @@ struct ItemV1 { impl ItemV1 { #[allow(dead_code)] - pub fn generate_my_primary_key(&self) -> Vec { + pub fn compute_primary_key(&self) -> Vec { format!("{}-{}", self.id, self.name).into() } } diff --git a/tests/primary_drain/only_primary_key.rs b/tests/primary_drain/only_primary_key.rs index 70ca3d11..14fabde1 100644 --- a/tests/primary_drain/only_primary_key.rs +++ b/tests/primary_drain/only_primary_key.rs @@ -1,11 +1,11 @@ -#![cfg(not(feature = "native_model"))] - +use native_db::*; +use native_model::{native_model, Model}; use serde::{Deserialize, Serialize}; use shortcut_assert_fs::TmpFs; -use struct_db::*; #[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] -#[struct_db(pk = generate_my_primary_key)] +#[native_model(id = 1, version = 1)] +#[native_db(primary_key(generate_my_primary_key))] struct Item { id: u32, name: String, @@ -31,85 +31,32 @@ fn drain_all() { name: "test".to_string(), }; - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - db.define::().unwrap(); + let mut builder = DatabaseBuilder::new(); + builder.define::().unwrap(); + let db = builder.create(tf.path("test").as_std_path()).unwrap(); // Insert 5 items - let txn = db.transaction().unwrap(); - { - let mut tables = txn.tables(); - tables.insert(&txn, item.clone()).unwrap(); - tables.insert(&txn, item.inc().clone()).unwrap(); - tables.insert(&txn, item.inc().clone()).unwrap(); - tables.insert(&txn, item.inc().clone()).unwrap(); - tables.insert(&txn, item.inc().clone()).unwrap(); - } - txn.commit().unwrap(); + let rw = db.rw_transaction().unwrap(); + rw.insert(item.clone()).unwrap(); + rw.insert(item.inc().clone()).unwrap(); + rw.insert(item.inc().clone()).unwrap(); + rw.insert(item.inc().clone()).unwrap(); + rw.insert(item.inc().clone()).unwrap(); + rw.commit().unwrap(); // Count items - let txn_read = db.read_transaction().unwrap(); - let len = txn_read.tables().len::(&txn_read).unwrap(); + let r = db.r_transaction().unwrap(); + let len = r.len().primary::().unwrap(); assert_eq!(len, 5); // Drain items - let txn = db.transaction().unwrap(); - { - let mut tables = txn.tables(); - let items = tables.primary_drain::(&txn, ..).unwrap(); - assert_eq!(items.len(), 5); - } - txn.commit().unwrap(); + let rw = db.rw_transaction().unwrap(); + let items = rw.drain().primary::().unwrap(); + assert_eq!(items.len(), 5); + rw.commit().unwrap(); // Count items - let txn_read = db.read_transaction().unwrap(); - let len = txn_read.tables().len::(&txn_read).unwrap(); + let r = db.r_transaction().unwrap(); + let len = r.len().primary::().unwrap(); assert_eq!(len, 0); } - -#[test] -fn drain_a_part() { - let tf = TmpFs::new().unwrap(); - - let mut item = Item { - id: 1, - name: "test".to_string(), - }; - - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - db.define::().unwrap(); - - // Insert 5 items - let txn = db.transaction().unwrap(); - { - let mut tables = txn.tables(); - tables.insert(&txn, item.clone()).unwrap(); - tables.insert(&txn, item.inc().clone()).unwrap(); - tables.insert(&txn, item.inc().clone()).unwrap(); - tables.insert(&txn, item.inc().clone()).unwrap(); - tables.insert(&txn, item.inc().clone()).unwrap(); - } - txn.commit().unwrap(); - - // Count items - let txn_read = db.read_transaction().unwrap(); - let len = txn_read.tables().len::(&txn_read).unwrap(); - assert_eq!(len, 5); - - // Drain items - let txn = db.transaction().unwrap(); - { - let mut tables = txn.tables(); - let items = tables - .primary_drain::(&txn, ..3_i32.to_le_bytes().as_slice()) - .unwrap(); - assert_eq!(items.len(), 2); - assert_eq!(items[0].id, 1); - assert_eq!(items[1].id, 2); - } - txn.commit().unwrap(); - - // Count items - let txn_read = db.read_transaction().unwrap(); - let len = txn_read.tables().len::(&txn_read).unwrap(); - assert_eq!(len, 3); -} diff --git a/tests/primary_drain/with_secondary_keys.rs b/tests/primary_drain/with_secondary_keys.rs index b2216f87..ed060572 100644 --- a/tests/primary_drain/with_secondary_keys.rs +++ b/tests/primary_drain/with_secondary_keys.rs @@ -1,14 +1,13 @@ -#![cfg(not(feature = "native_model"))] - -use redb::TableHandle; +use native_db::*; +use native_model::{native_model, Model}; use serde::{Deserialize, Serialize}; use shortcut_assert_fs::TmpFs; -use struct_db::*; #[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] -#[struct_db( - pk = generate_my_primary_key, - gk = generate_my_secondary_key +#[native_model(id = 1, version = 1)] +#[native_db( + primary_key(generate_my_primary_key), + secondary_key(generate_my_secondary_key, unique) )] struct Item { id: u32, @@ -17,16 +16,17 @@ struct Item { } impl Item { - pub fn generate_my_primary_key(&self) -> Vec { - self.id.to_be_bytes().to_vec() + pub fn generate_my_primary_key(&self) -> u32 { + self.id } - pub fn generate_my_secondary_key(&self) -> Vec { - let mut tag = self.tag.clone().into_bytes(); - let primary_key = self.generate_my_primary_key(); - tag.extend_from_slice(&primary_key); + pub fn generate_my_secondary_key(&self) -> String { + let mut tag = self.tag.clone(); + let primary_key = self.generate_my_primary_key().to_string(); + tag.push_str(&primary_key); tag } + pub fn inc(&mut self) -> &Self { self.id += 1; self @@ -43,114 +43,54 @@ fn drain_all() { tag: "red".to_string(), }; - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - db.define::().unwrap(); + let mut builder = DatabaseBuilder::new(); + builder.define::().unwrap(); + let db = builder.create(tf.path("test").as_std_path()).unwrap(); // Insert 5 items - let txn = db.transaction().unwrap(); - { - let mut tables = txn.tables(); - tables.insert(&txn, item.clone()).unwrap(); - tables.insert(&txn, item.inc().clone()).unwrap(); - tables.insert(&txn, item.inc().clone()).unwrap(); - tables.insert(&txn, item.inc().clone()).unwrap(); - tables.insert(&txn, item.inc().clone()).unwrap(); - } - txn.commit().unwrap(); + let rw = db.rw_transaction().unwrap(); + rw.insert(item.clone()).unwrap(); + rw.insert(item.inc().clone()).unwrap(); + rw.insert(item.inc().clone()).unwrap(); + rw.insert(item.inc().clone()).unwrap(); + rw.insert(item.inc().clone()).unwrap(); + rw.commit().unwrap(); let stats = db.redb_stats().unwrap(); - assert_eq!(stats.stats_tables.len(), 2); - assert_eq!(stats.stats_tables[0].name, "item"); - assert_eq!(stats.stats_tables[0].num_raw, 5); - assert_eq!(stats.stats_tables[1].name, "item_generate_my_secondary_key"); - assert_eq!(stats.stats_tables[1].num_raw, 5); + assert_eq!(stats.primary_tables.len(), 1); + assert_eq!(stats.primary_tables[0].name, "1_1_generate_my_primary_key"); + assert_eq!(stats.primary_tables[0].n_entries, Some(5)); + assert_eq!(stats.secondary_tables.len(), 1); + assert_eq!( + stats.secondary_tables[0].name, + "1_1_generate_my_secondary_key" + ); + assert_eq!(stats.secondary_tables[0].n_entries, Some(5)); // Count items - let txn_read = db.read_transaction().unwrap(); - let len = txn_read.tables().len::(&txn_read).unwrap(); + let r = db.r_transaction().unwrap(); + let len = r.len().primary::().unwrap(); assert_eq!(len, 5); // Drain items - let txn = db.transaction().unwrap(); - { - let mut tables = txn.tables(); - let items = tables.primary_drain::(&txn, ..).unwrap(); - assert_eq!(items.len(), 5); - } - txn.commit().unwrap(); + let rw = db.rw_transaction().unwrap(); + let items = rw.drain().primary::().unwrap(); + assert_eq!(items.len(), 5); + rw.commit().unwrap(); // Count items - let txn_read = db.read_transaction().unwrap(); - let len = txn_read.tables().len::(&txn_read).unwrap(); + let r = db.r_transaction().unwrap(); + let len = r.len().primary::().unwrap(); assert_eq!(len, 0); let stats = db.redb_stats().unwrap(); - assert_eq!(stats.stats_tables.len(), 2); - assert_eq!(stats.stats_tables[0].name, "item"); - assert_eq!(stats.stats_tables[0].num_raw, 0); - assert_eq!(stats.stats_tables[1].name, "item_generate_my_secondary_key"); - assert_eq!(stats.stats_tables[1].num_raw, 0); -} - -#[test] -fn drain_a_part() { - let tf = TmpFs::new().unwrap(); - - let mut item = Item { - id: 1, - name: "test".to_string(), - tag: "red".to_string(), - }; - - let mut db = Db::create(tf.path("test").as_std_path()).unwrap(); - db.define::().unwrap(); - - // Insert 5 items - let txn = db.transaction().unwrap(); - { - let mut tables = txn.tables(); - tables.insert(&txn, item.clone()).unwrap(); - tables.insert(&txn, item.inc().clone()).unwrap(); - tables.insert(&txn, item.inc().clone()).unwrap(); - tables.insert(&txn, item.inc().clone()).unwrap(); - tables.insert(&txn, item.inc().clone()).unwrap(); - } - txn.commit().unwrap(); - - let stats = db.redb_stats().unwrap(); - assert_eq!(stats.stats_tables.len(), 2); - assert_eq!(stats.stats_tables[0].name, "item"); - assert_eq!(stats.stats_tables[0].num_raw, 5); - assert_eq!(stats.stats_tables[1].name, "item_generate_my_secondary_key"); - assert_eq!(stats.stats_tables[1].num_raw, 5); - - // Count items - let txn_read = db.read_transaction().unwrap(); - let len = txn_read.tables().len::(&txn_read).unwrap(); - assert_eq!(len, 5); - - // Drain items - let txn = db.transaction().unwrap(); - { - let mut tables = txn.tables(); - let items = tables - .primary_drain::(&txn, ..3_i32.to_be_bytes().as_slice()) - .unwrap(); - assert_eq!(items.len(), 2); - assert_eq!(items[0].id, 1); - assert_eq!(items[1].id, 2); - } - txn.commit().unwrap(); - - // Count items - let txn_read = db.read_transaction().unwrap(); - let len = txn_read.tables().len::(&txn_read).unwrap(); - assert_eq!(len, 3); - - let stats = db.redb_stats().unwrap(); - assert_eq!(stats.stats_tables.len(), 2); - assert_eq!(stats.stats_tables[0].name, "item"); - assert_eq!(stats.stats_tables[0].num_raw, 3); - assert_eq!(stats.stats_tables[1].name, "item_generate_my_secondary_key"); - assert_eq!(stats.stats_tables[1].num_raw, 3); + assert_eq!(stats.primary_tables.len(), 1); + assert_eq!(stats.primary_tables[0].name, "1_1_generate_my_primary_key"); + assert_eq!(stats.primary_tables[0].n_entries, Some(0)); + assert_eq!(stats.secondary_tables.len(), 1); + assert_eq!( + stats.secondary_tables[0].name, + "1_1_generate_my_secondary_key" + ); + assert_eq!(stats.secondary_tables[0].n_entries, Some(0)); } diff --git a/tests/query/insert_get_pk.rs b/tests/query/insert_get_pk.rs new file mode 100644 index 00000000..34a1106c --- /dev/null +++ b/tests/query/insert_get_pk.rs @@ -0,0 +1,60 @@ +use native_db::*; +use native_model::{native_model, Model}; +use serde::{Deserialize, Serialize}; +use shortcut_assert_fs::TmpFs; + +#[derive(Serialize, Deserialize, Eq, PartialEq, Clone, Debug)] +#[native_model(id = 1, version = 1)] +#[native_db] +struct Item { + #[primary_key] + id: u32, + name: String, +} + +#[test] +fn insert_get() { + let item = Item { + id: 1, + name: "test".to_string(), + }; + + let tf = TmpFs::new().unwrap(); + let mut builder = DatabaseBuilder::new(); + builder.define::().unwrap(); + let db = builder.create(tf.path("test").as_std_path()).unwrap(); + + let rw = db.rw_transaction().unwrap(); + rw.insert(item.clone()).unwrap(); + rw.commit().unwrap(); + + let r = db.r_transaction().unwrap(); + let result_item = r.get().primary(1u32).unwrap().unwrap(); + assert_eq!(item, result_item); +} + +// TODO: insert should fail if the primary key already exists +// add a patch method to tables to allow for this +#[ignore] +#[test] +fn test_insert_duplicate_key() { + let tf = TmpFs::new().unwrap(); + + let item_1 = Item { + id: 1, + name: "test".to_string(), + }; + + let mut builder = DatabaseBuilder::new(); + builder.define::().unwrap(); + let db = builder.create(tf.path("test").as_std_path()).unwrap(); + + let rw = db.rw_transaction().unwrap(); + rw.insert(item_1.clone()).unwrap(); + let result = rw.insert(item_1.clone()); + assert!(result.is_err()); + assert!(matches!( + result.unwrap_err(), + db_type::Error::DuplicateKey { .. } + )); +} diff --git a/tests/query/insert_get_sk.rs b/tests/query/insert_get_sk.rs new file mode 100644 index 00000000..f929bdd6 --- /dev/null +++ b/tests/query/insert_get_sk.rs @@ -0,0 +1,155 @@ +use native_db::*; +use native_model::{native_model, Model}; +use serde::{Deserialize, Serialize}; +use shortcut_assert_fs::TmpFs; + +#[derive(Serialize, Deserialize, Eq, PartialEq, Clone, Debug)] +#[native_model(id = 1, version = 1)] +#[native_db(primary_key(pk), secondary_key(gk_1, unique))] +struct Item { + id: u32, + name: String, +} + +impl Item { + pub fn pk(&self) -> String { + format!("{}", self.id) + } + + pub fn gk_1(&self) -> String { + format!("{}-{}", self.name, self.id) + } +} + +#[test] +fn insert_get_read_write_transaction() { + let tf = TmpFs::new().unwrap(); + + let item = Item { + id: 1, + name: "test".to_string(), + }; + + let mut builder = DatabaseBuilder::new(); + builder.define::().unwrap(); + let db = builder.create(tf.path("test").as_std_path()).unwrap(); + + let rw = db.rw_transaction().unwrap(); + rw.insert(item.clone()).unwrap(); + rw.commit().unwrap(); + + let rw = db.rw_transaction().unwrap(); + let result_item = rw + .get() + .secondary(ItemKey::gk_1, "test-1") + .unwrap() + .unwrap(); + assert_eq!(item, result_item); + rw.commit().unwrap(); +} + +#[test] +fn insert_get_read_transaction() { + let tf = TmpFs::new().unwrap(); + + let item = Item { + id: 1, + name: "test".to_string(), + }; + + let mut builder = DatabaseBuilder::new(); + builder.define::().unwrap(); + let db = builder.create(tf.path("test").as_std_path()).unwrap(); + + let rw = db.rw_transaction().unwrap(); + rw.insert(item.clone()).unwrap(); + rw.commit().unwrap(); + + let r = db.r_transaction().unwrap(); + let result_item = r.get().secondary(ItemKey::gk_1, "test-1").unwrap().unwrap(); + + assert_eq!(item, result_item); +} + +#[derive(Serialize, Deserialize, Eq, PartialEq, Clone, Debug)] +#[native_model(id = 1, version = 1)] +#[native_db] +struct ItemDuplicate { + #[primary_key] + id: u32, + #[secondary_key(unique)] + name: String, +} + +#[test] +fn test_insert_duplicate_key() { + let tf = TmpFs::new().unwrap(); + + let item_1 = ItemDuplicate { + id: 1, + name: "test".to_string(), + }; + let item_2 = ItemDuplicate { + id: 2, + name: "test".to_string(), + }; + + let mut builder = DatabaseBuilder::new(); + builder.define::().unwrap(); + let db = builder.create(tf.path("test").as_std_path()).unwrap(); + + let rw = db.rw_transaction().unwrap(); + rw.insert(item_1).unwrap(); + let result = rw.insert(item_2); + assert!(result.is_err()); + assert!(matches!( + result.unwrap_err(), + db_type::Error::DuplicateKey { .. } + )); +} + +#[derive(Serialize, Deserialize, Eq, PartialEq, Clone, Debug)] +#[native_model(id = 1, version = 1)] +#[native_db] +struct ItemOptional { + #[primary_key] + id: u32, + #[secondary_key(unique, optional)] + name: Option, +} + +#[test] +fn test_insert_optional() { + let tf = TmpFs::new().unwrap(); + + let item_1 = ItemOptional { + id: 1, + name: Some("test".to_string()), + }; + let item_2 = ItemOptional { id: 2, name: None }; + + let mut builder = DatabaseBuilder::new(); + builder.define::().unwrap(); + let db = builder.create(tf.path("test").as_std_path()).unwrap(); + + let rw = db.rw_transaction().unwrap(); + rw.insert(item_1.clone()).unwrap(); + rw.insert(item_2.clone()).unwrap(); + rw.commit().unwrap(); + + let stats = db.redb_stats().unwrap(); + assert_eq!(stats.primary_tables.len(), 1); + assert_eq!(stats.primary_tables[0].name, "1_1_id"); + assert_eq!(stats.primary_tables[0].n_entries, Some(2)); + assert_eq!(stats.secondary_tables.len(), 1); + assert_eq!(stats.secondary_tables[0].name, "1_1_name"); + assert_eq!(stats.secondary_tables[0].n_entries, Some(1)); + + let r = db.r_transaction().unwrap(); + let result_item = r + .get() + .secondary(ItemOptionalKey::name, "test") + .unwrap() + .unwrap(); + assert_eq!(item_1, result_item); +} diff --git a/tests/query/insert_len_pk.rs b/tests/query/insert_len_pk.rs new file mode 100644 index 00000000..37157c79 --- /dev/null +++ b/tests/query/insert_len_pk.rs @@ -0,0 +1,48 @@ +use native_db::*; +use native_model::{native_model, Model}; +use serde::{Deserialize, Serialize}; +use shortcut_assert_fs::TmpFs; + +#[derive(Serialize, Deserialize, Eq, PartialEq, Clone, Debug)] +#[native_model(id = 1, version = 1)] +#[native_db] +struct Item { + #[primary_key] + id: u32, + name: String, +} + +#[test] +fn insert_len_read_transaction() { + let tf = TmpFs::new().unwrap(); + + let item = Item { + id: 1, + name: "test".to_string(), + }; + + let mut builder = DatabaseBuilder::new(); + builder.define::().unwrap(); + let db = builder.create(tf.path("test").as_std_path()).unwrap(); + + let rw = db.rw_transaction().unwrap(); + rw.insert(item.clone()).unwrap(); + rw.commit().unwrap(); + + let r = db.r_transaction().unwrap(); + let result_item = r.len().primary::().unwrap(); + assert_eq!(1, result_item); + + let item = Item { + id: 2, + name: "test".to_string(), + }; + + let rw = db.rw_transaction().unwrap(); + rw.insert(item.clone()).unwrap(); + rw.commit().unwrap(); + + let r = db.r_transaction().unwrap(); + let result_item = r.len().primary::().unwrap(); + assert_eq!(2, result_item); +} diff --git a/tests/query/insert_remove_pk.rs b/tests/query/insert_remove_pk.rs new file mode 100644 index 00000000..a5d0bf95 --- /dev/null +++ b/tests/query/insert_remove_pk.rs @@ -0,0 +1,45 @@ +use native_db::*; +use native_model::{native_model, Model}; +use serde::{Deserialize, Serialize}; +use shortcut_assert_fs::TmpFs; + +#[derive(Serialize, Deserialize, Eq, PartialEq, Clone, Debug)] +#[native_model(id = 1, version = 1)] +#[native_db] +struct Item { + #[primary_key] + id: u32, + name: String, +} + +#[test] +fn insert_get() { + let tf = TmpFs::new().unwrap(); + + let item = Item { + id: 1, + name: "test".to_string(), + }; + + let mut builder = DatabaseBuilder::new(); + builder.define::().unwrap(); + let db = builder.create(tf.path("test").as_std_path()).unwrap(); + + let rw = db.rw_transaction().unwrap(); + rw.insert(item.clone()).unwrap(); + rw.commit().unwrap(); + + let stats = db.redb_stats().unwrap(); + assert_eq!(stats.primary_tables.len(), 1); + assert_eq!(stats.primary_tables[0].name, "1_1_id"); + assert_eq!(stats.primary_tables[0].n_entries, Some(1)); + + let rw = db.rw_transaction().unwrap(); + rw.remove(item.clone()).unwrap(); + rw.commit().unwrap(); + + let stats = db.redb_stats().unwrap(); + assert_eq!(stats.primary_tables.len(), 1); + assert_eq!(stats.primary_tables[0].name, "1_1_id"); + assert_eq!(stats.primary_tables[0].n_entries, Some(0)); +} diff --git a/tests/query/insert_remove_sk.rs b/tests/query/insert_remove_sk.rs new file mode 100644 index 00000000..5c245fbc --- /dev/null +++ b/tests/query/insert_remove_sk.rs @@ -0,0 +1,113 @@ +use native_db::*; +use native_model::{native_model, Model}; +use serde::{Deserialize, Serialize}; +use shortcut_assert_fs::TmpFs; + +#[derive(Serialize, Deserialize, Eq, PartialEq, Clone, Debug)] +#[native_model(id = 1, version = 1)] +#[native_db] +struct Item { + #[primary_key] + id: u32, + #[secondary_key] + name: String, +} + +#[test] +fn insert_remove() { + let tf = TmpFs::new().unwrap(); + + let item = Item { + id: 1, + name: "test".to_string(), + }; + + let mut builder = DatabaseBuilder::new(); + builder.define::().unwrap(); + let db = builder.create(tf.path("test").as_std_path()).unwrap(); + + let rw = db.rw_transaction().unwrap(); + rw.insert(item.clone()).unwrap(); + rw.commit().unwrap(); + + let stats = db.redb_stats().unwrap(); + assert_eq!(stats.primary_tables.len(), 1); + assert_eq!(stats.primary_tables[0].name, "1_1_id"); + assert_eq!(stats.primary_tables[0].n_entries, Some(1)); + assert_eq!(stats.secondary_tables.len(), 1); + assert_eq!(stats.secondary_tables[0].name, "1_1_name"); + assert_eq!(stats.secondary_tables[0].n_entries, Some(1)); + + let rw = db.rw_transaction().unwrap(); + rw.remove(item.clone()).unwrap(); + rw.commit().unwrap(); + + let stats = db.redb_stats().unwrap(); + assert_eq!(stats.primary_tables.len(), 1); + assert_eq!(stats.primary_tables[0].name, "1_1_id"); + assert_eq!(stats.primary_tables[0].n_entries, Some(0)); + assert_eq!(stats.secondary_tables.len(), 1); + assert_eq!(stats.secondary_tables[0].name, "1_1_name"); + assert_eq!(stats.secondary_tables[0].n_entries, Some(0)); +} + +#[derive(Serialize, Deserialize, Eq, PartialEq, Clone, Debug)] +#[native_model(id = 1, version = 1)] +#[native_db] +struct ItemOptional { + #[primary_key] + id: u32, + #[secondary_key(unique, optional)] + name: Option, +} +#[test] +fn insert_remove_unique_optional() { + let tf = TmpFs::new().unwrap(); + + let item_1 = ItemOptional { + id: 1, + name: Some("test".to_string()), + }; + let item_2 = ItemOptional { id: 2, name: None }; + + let mut builder = DatabaseBuilder::new(); + builder.define::().unwrap(); + let db = builder.create(tf.path("test").as_std_path()).unwrap(); + + let rw = db.rw_transaction().unwrap(); + rw.insert(item_1.clone()).unwrap(); + rw.insert(item_2.clone()).unwrap(); + rw.commit().unwrap(); + + let stats = db.redb_stats().unwrap(); + assert_eq!(stats.primary_tables.len(), 1); + assert_eq!(stats.primary_tables[0].name, "1_1_id"); + assert_eq!(stats.primary_tables[0].n_entries, Some(2)); + assert_eq!(stats.secondary_tables.len(), 1); + assert_eq!(stats.secondary_tables[0].name, "1_1_name"); + assert_eq!(stats.secondary_tables[0].n_entries, Some(1)); + + let rw = db.rw_transaction().unwrap(); + rw.remove(item_1.clone()).unwrap(); + rw.commit().unwrap(); + + let stats = db.redb_stats().unwrap(); + assert_eq!(stats.primary_tables.len(), 1); + assert_eq!(stats.primary_tables[0].name, "1_1_id"); + assert_eq!(stats.primary_tables[0].n_entries, Some(1)); + assert_eq!(stats.secondary_tables.len(), 1); + assert_eq!(stats.secondary_tables[0].name, "1_1_name"); + assert_eq!(stats.secondary_tables[0].n_entries, Some(0)); + + let rw = db.rw_transaction().unwrap(); + rw.remove(item_2.clone()).unwrap(); + rw.commit().unwrap(); + + let stats = db.redb_stats().unwrap(); + assert_eq!(stats.primary_tables.len(), 1); + assert_eq!(stats.primary_tables[0].name, "1_1_id"); + assert_eq!(stats.primary_tables[0].n_entries, Some(0)); + assert_eq!(stats.secondary_tables.len(), 1); + assert_eq!(stats.secondary_tables[0].name, "1_1_name"); + assert_eq!(stats.secondary_tables[0].n_entries, Some(0)); +} diff --git a/tests/query/insert_update_pk.rs b/tests/query/insert_update_pk.rs new file mode 100644 index 00000000..1816793a --- /dev/null +++ b/tests/query/insert_update_pk.rs @@ -0,0 +1,62 @@ +use native_db::*; +use native_model::{native_model, Model}; +use serde::{Deserialize, Serialize}; +use shortcut_assert_fs::TmpFs; + +#[derive(Serialize, Deserialize, Eq, PartialEq, Clone, Debug)] +#[native_model(id = 1, version = 1)] +#[native_db] +struct Item { + #[primary_key] + id: u32, + name: String, +} + +#[test] +fn insert_update_pk() { + let tf = TmpFs::new().unwrap(); + + let item = Item { + id: 1, + name: "test".to_string(), + }; + + let mut builder = DatabaseBuilder::new(); + builder.define::().unwrap(); + let db = builder.create(tf.path("test").as_std_path()).unwrap(); + + // Insert the item + let rw = db.rw_transaction().unwrap(); + rw.insert(item.clone()).unwrap(); + rw.commit().unwrap(); + + // Check if the item is in the database + let txn = db.r_transaction().unwrap(); + let item2: Item = txn.get().primary(1u32).unwrap().unwrap(); + assert_eq!(item, item2); + + let item2 = Item { + id: 2, + name: "test2".to_string(), + }; + + // Update the item + let rw = db.rw_transaction().unwrap(); + rw.update(item.clone(), item2.clone()).unwrap(); + rw.commit().unwrap(); + + // Check if the item v1 is not in the database + let r = db.r_transaction().unwrap(); + let item2: Option = r.get().primary(1u32).unwrap(); + assert_eq!(item2, None); + + // Check if the item v2 is in the database + let r = db.r_transaction().unwrap(); + let item2: Item = r.get().primary(2u32).unwrap().unwrap(); + assert_eq!(item2, item2); + + // Check if length is 1 + let r = db.r_transaction().unwrap(); + let length = r.len().primary::().unwrap(); + assert_eq!(length, 1); +} diff --git a/tests/query/insert_update_sk.rs b/tests/query/insert_update_sk.rs new file mode 100644 index 00000000..cba727b7 --- /dev/null +++ b/tests/query/insert_update_sk.rs @@ -0,0 +1,78 @@ +use native_db::*; +use native_model::{native_model, Model}; +use serde::{Deserialize, Serialize}; +use shortcut_assert_fs::TmpFs; + +#[derive(Serialize, Deserialize, Eq, PartialEq, Clone, Debug)] +#[native_model(id = 1, version = 1)] +#[native_db] +struct Item { + #[primary_key] + id: u32, + #[secondary_key(unique)] + name: String, +} + +#[test] +fn insert_update_sk() { + let tf = TmpFs::new().unwrap(); + + let item = Item { + id: 1, + name: "test".to_string(), + }; + + let mut builder = DatabaseBuilder::new(); + builder.define::().unwrap(); + let db = builder.create(tf.path("test").as_std_path()).unwrap(); + + // Insert the item + let rw = db.rw_transaction().unwrap(); + rw.insert(item.clone()).unwrap(); + rw.commit().unwrap(); + + // Check if the item is in the database by primary key + let r = db.r_transaction().unwrap(); + let item2: Item = r.get().primary(1u32).unwrap().unwrap(); + assert_eq!(item, item2); + + // Check if the item is in the database by secondary key + let r = db.r_transaction().unwrap(); + let item2: Item = r.get().secondary(ItemKey::name, "test").unwrap().unwrap(); + assert_eq!(item, item2); + + let item_v2 = Item { + id: 2, + name: "test2".to_string(), + }; + + // Update the item + let rw = db.rw_transaction().unwrap(); + rw.update(item.clone(), item_v2.clone()).unwrap(); + rw.commit().unwrap(); + + // Check if the item v1 is not in the database by primary key + let r = db.r_transaction().unwrap(); + let item2: Option = r.get().primary(1u32).unwrap(); + assert_eq!(item2, None); + + // Check if the item v1 is not in the database by secondary key + let r = db.r_transaction().unwrap(); + let item2: Option = r.get().secondary(ItemKey::name, "test").unwrap(); + assert_eq!(item2, None); + + // Check if the item v2 is in the database by primary key + let r = db.r_transaction().unwrap(); + let item2: Item = r.get().secondary(ItemKey::name, "test2").unwrap().unwrap(); + assert_eq!(item2, item_v2); + + // Check if the item v2 is in the database by secondary key + let r = db.r_transaction().unwrap(); + let item2: Item = r.get().primary(2u32).unwrap().unwrap(); + assert_eq!(item2, item_v2); + + // Check length is 1 + let r = db.r_transaction().unwrap(); + let length = r.len().primary::().unwrap(); + assert_eq!(length, 1); +} diff --git a/tests/query/mod.rs b/tests/query/mod.rs new file mode 100644 index 00000000..f348a2c5 --- /dev/null +++ b/tests/query/mod.rs @@ -0,0 +1,7 @@ +mod insert_get_pk; +mod insert_get_sk; +mod insert_len_pk; +mod insert_remove_pk; +mod insert_remove_sk; +mod insert_update_pk; +mod insert_update_sk; diff --git a/tests/scan.rs b/tests/scan.rs new file mode 100644 index 00000000..321e023c --- /dev/null +++ b/tests/scan.rs @@ -0,0 +1,586 @@ +// TODO: refactor and move to query/ folder + +use native_db::*; +use native_model::{native_model, Model}; +use serde::{Deserialize, Serialize}; +use shortcut_assert_fs::TmpFs; + +#[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] +#[native_model(id = 1, version = 1)] +#[native_db( + primary_key(generate_my_primary_key), + secondary_key(secondary_key_1, unique), + secondary_key(secondary_key_2, unique) +)] +struct Item { + id: u32, + name: String, +} + +impl Item { + pub fn new(id: u32, name: &str) -> Self { + Self { + id, + name: name.to_string(), + } + } + pub fn generate_my_primary_key(&self) -> u32 { + self.id + } + pub fn secondary_key_1(&self) -> String { + format!("{}", self.id).into() + } + pub fn secondary_key_2(&self) -> String { + format!("{}", self.name) + } +} + +#[test] +fn test_iter() { + let tf = TmpFs::new().unwrap(); + + let mut builder = DatabaseBuilder::new(); + builder.define::().unwrap(); + let db = builder.create(tf.path("test").as_std_path()).unwrap(); + + let rw = db.rw_transaction().unwrap(); + rw.insert(Item::new(1, "test")).unwrap(); + rw.insert(Item::new(2, "test2")).unwrap(); + rw.commit().unwrap(); + + let r = db.r_transaction().unwrap(); + let result: Vec = r.scan().primary().unwrap().iter().collect(); + assert_eq!(result.len(), 2); + + let obj1 = result.get(0).unwrap(); + assert_eq!(obj1.id, 1); + assert_eq!(obj1.name, "test"); + + let obj2 = result.get(1).unwrap(); + assert_eq!(obj2.id, 2); + assert_eq!(obj2.name, "test2"); +} + +// Check if the use of BigEndian is correct +#[test] +fn test_iter_many_items_to_be_bytes() { + let tf = TmpFs::new().unwrap(); + + let mut builder = DatabaseBuilder::new(); + builder.define::().unwrap(); + let db = builder.create(tf.path("test").as_std_path()).unwrap(); + + let rw = db.rw_transaction().unwrap(); + // Insert 1000 items + for i in 0..257 { + rw.insert(Item::new(i, format!("test_{}", i).as_str())) + .unwrap(); + } + rw.commit().unwrap(); + + let r = db.r_transaction().unwrap(); + { + let iter: Vec = r.scan().primary().unwrap().iter().collect(); + assert_eq!(iter.len(), 257); + + let obj1 = iter.get(0).unwrap(); + assert_eq!(obj1.id, 0); + assert_eq!(obj1.name, "test_0"); + + let obj2 = iter.get(1).unwrap(); + assert_eq!(obj2.id, 1); + assert_eq!(obj2.name, "test_1"); + + let obj3 = iter.get(256).unwrap(); + assert_eq!(obj3.id, 256); + assert_eq!(obj3.name, "test_256"); + } +} + +#[test] +fn test_double_ended_iter() { + let tf = TmpFs::new().unwrap(); + + let mut builder = DatabaseBuilder::new(); + builder.define::().unwrap(); + let db = builder.create(tf.path("test").as_std_path()).unwrap(); + + let rw = db.rw_transaction().unwrap(); + rw.insert(Item::new(1, "test")).unwrap(); + rw.insert(Item::new(2, "test2")).unwrap(); + rw.commit().unwrap(); + + let r = db.r_transaction().unwrap(); + let scan = r.scan().primary().unwrap(); + let iter = scan.iter(); + let result: Vec = iter.rev().collect(); + + assert_eq!(result.len(), 2); + + let obj1 = result.get(0).unwrap(); + + assert_eq!(obj1.id, 2); + assert_eq!(obj1.name, "test2"); + + let obj2 = result.get(1).unwrap(); + assert_eq!(obj2.id, 1); + assert_eq!(obj2.name, "test"); +} + +#[test] +fn test_iter_range() { + let tf = TmpFs::new().unwrap(); + + let mut builder = DatabaseBuilder::new(); + builder.define::().unwrap(); + let db = builder.create(tf.path("test").as_std_path()).unwrap(); + + let rw = db.rw_transaction().unwrap(); + rw.insert(Item::new(1, "test")).unwrap(); + rw.insert(Item::new(2, "test2")).unwrap(); + rw.insert(Item::new(3, "test3")).unwrap(); + rw.commit().unwrap(); + + let r = db.r_transaction().unwrap(); + let result: Vec = r.scan().primary().unwrap().range(..2_i32).collect(); + assert_eq!(result.len(), 1); + + let obj1 = result.get(0).unwrap(); + assert_eq!(obj1.id, 1); + assert_eq!(obj1.name, "test"); + + let result: Vec = r.scan().primary().unwrap().range(2_i32..).collect(); + assert_eq!(result.len(), 2); + + let obj1 = result.get(0).unwrap(); + assert_eq!(obj1.id, 2); + assert_eq!(obj1.name, "test2"); + + let obj2 = result.get(1).unwrap(); + assert_eq!(obj2.id, 3); + assert_eq!(obj2.name, "test3"); + + let result: Vec = r.scan().primary().unwrap().range(2_i32..3_i32).collect(); + assert_eq!(result.len(), 1); + + let obj1 = result.get(0).unwrap(); + assert_eq!(obj1.id, 2); + assert_eq!(obj1.name, "test2"); +} + +#[test] +fn test_iter_by_key() { + let tf = TmpFs::new().unwrap(); + + let mut builder = DatabaseBuilder::new(); + builder.define::().unwrap(); + let db = builder.create(tf.path("test").as_std_path()).unwrap(); + + let rw = db.rw_transaction().unwrap(); + rw.insert(Item::new(1, "test")).unwrap(); + rw.insert(Item::new(2, "test2")).unwrap(); + rw.commit().unwrap(); + + let r = db.r_transaction().unwrap(); + let result: Vec = r + .scan() + .secondary(ItemKey::secondary_key_1) + .unwrap() + .all() + .collect(); + + assert_eq!(result.len(), 2); + + let obj1 = result.get(0).unwrap(); + assert_eq!(obj1.id, 1); + assert_eq!(obj1.name, "test"); + + let obj2 = result.get(1).unwrap(); + assert_eq!(obj2.id, 2); + assert_eq!(obj2.name, "test2"); +} + +#[test] +fn test_double_ended_iter_by_key() { + let tf = TmpFs::new().unwrap(); + + let mut builder = DatabaseBuilder::new(); + builder.define::().unwrap(); + let db = builder.create(tf.path("test").as_std_path()).unwrap(); + + let rw = db.rw_transaction().unwrap(); + rw.insert(Item::new(1, "test")).unwrap(); + rw.insert(Item::new(2, "test2")).unwrap(); + rw.commit().unwrap(); + + let r = db.r_transaction().unwrap(); + let scan = r.scan().secondary(ItemKey::secondary_key_1).unwrap(); + let iter = scan.all(); + let result: Vec = iter.rev().collect(); + + assert_eq!(result.len(), 2); + + let obj1 = result.get(0).unwrap(); + assert_eq!(obj1.id, 2); + assert_eq!(obj1.name, "test2"); + + let obj2 = result.get(1).unwrap(); + assert_eq!(obj2.id, 1); + assert_eq!(obj2.name, "test"); +} + +#[test] +fn test_double_ended_iter_by_key_range() { + let tf = TmpFs::new().unwrap(); + + let mut builder = DatabaseBuilder::new(); + builder.define::().unwrap(); + let db = builder.create(tf.path("test").as_std_path()).unwrap(); + + let rw = db.rw_transaction().unwrap(); + rw.insert(Item::new(1, "test")).unwrap(); + rw.insert(Item::new(2, "test2")).unwrap(); + rw.insert(Item::new(3, "test3")).unwrap(); + rw.commit().unwrap(); + + let r = db.r_transaction().unwrap(); + let scan = r.scan().secondary(ItemKey::secondary_key_1).unwrap(); + let iter = scan.range(..b"2".as_slice()); + let result: Vec = iter.rev().collect(); + + assert_eq!(result.len(), 1); + + let obj1 = result.get(0).unwrap(); + assert_eq!(obj1.id, 1); + assert_eq!(obj1.name, "test"); + + let scan = r.scan().secondary(ItemKey::secondary_key_1).unwrap(); + let iter = scan.range(b"2".as_slice()..); + let result: Vec = iter.rev().collect(); + + assert_eq!(result.len(), 2); + + let obj1 = result.get(0).unwrap(); + assert_eq!(obj1.id, 3); + assert_eq!(obj1.name, "test3"); + + let obj2 = result.get(1).unwrap(); + assert_eq!(obj2.id, 2); + assert_eq!(obj2.name, "test2"); + + let scan = r.scan().secondary(ItemKey::secondary_key_1).unwrap(); + let iter = scan.range(b"2".as_slice()..b"3".as_slice()); + let result: Vec = iter.rev().collect(); + + assert_eq!(result.len(), 1); + + let obj1 = result.get(0).unwrap(); + assert_eq!(obj1.id, 2); + assert_eq!(obj1.name, "test2"); +} + +#[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] +#[native_model(id = 2, version = 1)] +#[native_db(primary_key(generate_my_primary_key))] +struct ItemFlag { + name: String, +} + +impl ItemFlag { + pub fn new(name: &str) -> Self { + Self { + name: name.to_string(), + } + } + + pub fn generate_my_primary_key(&self) -> String { + self.name.to_string() + } +} + +#[test] +fn test_start_with_scenario() { + let tf = TmpFs::new().unwrap(); + + let mut builder = DatabaseBuilder::new(); + builder.define::().unwrap(); + let db = builder.create(tf.path("test").as_std_path()).unwrap(); + + let rw = db.rw_transaction().unwrap(); + // Red flag + rw.insert(ItemFlag::new("red:1")).unwrap(); + rw.insert(ItemFlag::new("red:2")).unwrap(); + rw.insert(ItemFlag::new("red:3")).unwrap(); + // Blue flag + rw.insert(ItemFlag::new("blue:1")).unwrap(); + rw.insert(ItemFlag::new("blue:2")).unwrap(); + rw.insert(ItemFlag::new("blue:3")).unwrap(); + // Green flag + rw.insert(ItemFlag::new("green:1")).unwrap(); + rw.insert(ItemFlag::new("green:2")).unwrap(); + rw.insert(ItemFlag::new("green:3")).unwrap(); + rw.commit().unwrap(); + + for p in ["red:", "blue:", "green:"] { + let r = db.r_transaction().unwrap(); + + let result = r + .scan() + .primary() + .unwrap() + .start_with(p.to_string().as_str()) + .collect::>(); + assert_eq!(result.len(), 3); + + let obj1 = result.get(0).unwrap(); + assert_eq!(obj1.name, format!("{}1", p)); + + let obj2 = result.get(1).unwrap(); + assert_eq!(obj2.name, format!("{}2", p)); + + let obj3 = result.get(2).unwrap(); + assert_eq!(obj3.name, format!("{}3", p)); + } +} + +#[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] +#[native_model(id = 3, version = 1)] +#[native_db(primary_key(generate_my_primary_key), secondary_key(flag, unique))] +struct ItemIdFlag { + id: String, + flag: String, +} + +impl ItemIdFlag { + pub fn new(id: &str, name: &str) -> Self { + Self { + id: id.to_string(), + flag: name.to_string(), + } + } + + pub fn generate_my_primary_key(&self) -> String { + self.id.clone() + } + pub fn flag(&self) -> String { + format!("{}:{}", self.flag, self.id) + } +} + +#[test] +fn test_start_with_by_key_scenario_write_txn() { + let tf = TmpFs::new().unwrap(); + + let mut builder = DatabaseBuilder::new(); + builder.define::().unwrap(); + let db = builder.create(tf.path("test").as_std_path()).unwrap(); + + let rw = db.rw_transaction().unwrap(); + + // Red flag + rw.insert(ItemIdFlag::new("1", "red")).unwrap(); + rw.insert(ItemIdFlag::new("2", "red")).unwrap(); + rw.insert(ItemIdFlag::new("3", "red")).unwrap(); + // Blue flag + rw.insert(ItemIdFlag::new("4", "blue")).unwrap(); + rw.insert(ItemIdFlag::new("5", "blue")).unwrap(); + rw.insert(ItemIdFlag::new("6", "blue")).unwrap(); + // Green flag + rw.insert(ItemIdFlag::new("7", "green")).unwrap(); + rw.insert(ItemIdFlag::new("8", "green")).unwrap(); + rw.insert(ItemIdFlag::new("9", "green")).unwrap(); + + rw.commit().unwrap(); + + for p in ["red:", "blue:", "green:"] { + let rw = db.rw_transaction().unwrap(); + + let result: Vec = rw + .scan() + .secondary(ItemIdFlagKey::flag) + .unwrap() + .start_with(p.to_string().as_str()) + .collect(); + assert_eq!(result.len(), 3); + + let obj1 = result.get(0).unwrap(); + assert_eq!(format!("{}:", obj1.flag), format!("{}", p)); + + let obj2 = result.get(1).unwrap(); + assert_eq!(format!("{}:", obj2.flag), format!("{}", p)); + + let obj3 = result.get(2).unwrap(); + assert_eq!(format!("{}:", obj3.flag), format!("{}", p)); + } +} + +#[test] +fn test_start_with_by_key_scenario_readonly_txn() { + let tf = TmpFs::new().unwrap(); + + let mut builder = DatabaseBuilder::new(); + builder.define::().unwrap(); + let db = builder.create(tf.path("test").as_std_path()).unwrap(); + + let rw = db.rw_transaction().unwrap(); + // Red flag + rw.insert(ItemIdFlag::new("1", "red")).unwrap(); + rw.insert(ItemIdFlag::new("2", "red")).unwrap(); + rw.insert(ItemIdFlag::new("3", "red")).unwrap(); + // Blue flag + rw.insert(ItemIdFlag::new("4", "blue")).unwrap(); + rw.insert(ItemIdFlag::new("5", "blue")).unwrap(); + rw.insert(ItemIdFlag::new("6", "blue")).unwrap(); + // Green flag + rw.insert(ItemIdFlag::new("7", "green")).unwrap(); + rw.insert(ItemIdFlag::new("8", "green")).unwrap(); + rw.insert(ItemIdFlag::new("9", "green")).unwrap(); + rw.commit().unwrap(); + + for p in ["red:", "blue:", "green:"] { + let r = db.r_transaction().unwrap(); + let result: Vec = r + .scan() + .secondary(ItemIdFlagKey::flag) + .unwrap() + .start_with(p.to_string().as_str()) + .collect(); + assert_eq!(result.len(), 3); + + let obj1 = result.get(0).unwrap(); + assert_eq!(format!("{}:", obj1.flag), format!("{}", p)); + + let obj2 = result.get(1).unwrap(); + assert_eq!(format!("{}:", obj2.flag), format!("{}", p)); + + let obj3 = result.get(2).unwrap(); + assert_eq!(format!("{}:", obj3.flag), format!("{}", p)); + } +} + +#[test] +fn test_txn_write_iter() { + let tf = TmpFs::new().unwrap(); + + let mut builder = DatabaseBuilder::new(); + builder.define::().unwrap(); + let db = builder.create(tf.path("test").as_std_path()).unwrap(); + + let rw = db.rw_transaction().unwrap(); + rw.insert(Item::new(1, "test")).unwrap(); + rw.insert(Item::new(2, "test2")).unwrap(); + rw.commit().unwrap(); + + let rw = db.rw_transaction().unwrap(); + let result: Vec = rw.scan().primary().unwrap().iter().collect(); + assert_eq!(result.len(), 2); + + let obj1 = result.get(0).unwrap(); + assert_eq!(obj1.id, 1); + assert_eq!(obj1.name, "test"); + + let obj2 = result.get(1).unwrap(); + assert_eq!(obj2.id, 2); + assert_eq!(obj2.name, "test2"); +} + +#[test] +fn test_txn_write_iter_range() { + let tf = TmpFs::new().unwrap(); + + let mut builder = DatabaseBuilder::new(); + builder.define::().unwrap(); + let db = builder.create(tf.path("test").as_std_path()).unwrap(); + + let rw = db.rw_transaction().unwrap(); + rw.insert(Item::new(1, "test")).unwrap(); + rw.insert(Item::new(2, "test2")).unwrap(); + rw.insert(Item::new(3, "test3")).unwrap(); + rw.commit().unwrap(); + + let rw = db.rw_transaction().unwrap(); + let result: Vec = rw + .scan() + .primary() + .unwrap() + .range(..2_i32.to_be_bytes().as_slice()) + .collect(); + assert_eq!(result.len(), 1); + + let obj1 = result.get(0).unwrap(); + assert_eq!(obj1.id, 1); + assert_eq!(obj1.name, "test"); + + let result: Vec = rw + .scan() + .primary() + .unwrap() + .range(2_i32.to_be_bytes().as_slice()..) + .collect(); + assert_eq!(result.len(), 2); + + let obj1 = result.get(0).unwrap(); + assert_eq!(obj1.id, 2); + assert_eq!(obj1.name, "test2"); + + let obj2 = result.get(1).unwrap(); + assert_eq!(obj2.id, 3); + assert_eq!(obj2.name, "test3"); + + let result: Vec = rw + .scan() + .primary() + .unwrap() + .range(2_i32.to_be_bytes().as_slice()..3_i32.to_be_bytes().as_slice()) + .collect(); + assert_eq!(result.len(), 1); + + let obj1 = result.get(0).unwrap(); + assert_eq!(obj1.id, 2); + assert_eq!(obj1.name, "test2"); +} + +#[test] +fn test_txn_write_start_with_scenario() { + let tf = TmpFs::new().unwrap(); + + let mut builder = DatabaseBuilder::new(); + builder.define::().unwrap(); + let db = builder.create(tf.path("test").as_std_path()).unwrap(); + + let rw = db.rw_transaction().unwrap(); + // Red flag + rw.insert(ItemFlag::new("red:1")).unwrap(); + rw.insert(ItemFlag::new("red:2")).unwrap(); + rw.insert(ItemFlag::new("red:3")).unwrap(); + // Blue flag + rw.insert(ItemFlag::new("blue:1")).unwrap(); + rw.insert(ItemFlag::new("blue:2")).unwrap(); + rw.insert(ItemFlag::new("blue:3")).unwrap(); + // Green flag + rw.insert(ItemFlag::new("green:1")).unwrap(); + rw.insert(ItemFlag::new("green:2")).unwrap(); + rw.insert(ItemFlag::new("green:3")).unwrap(); + rw.commit().unwrap(); + + for p in ["red:", "blue:", "green:"] { + let rw = db.rw_transaction().unwrap(); + + let result: Vec = rw + .scan() + .primary() + .unwrap() + .start_with(p.to_string().as_str()) + .collect(); + assert_eq!(result.len(), 3); + + let obj1 = result.get(0).unwrap(); + assert_eq!(obj1.name, format!("{}1", p)); + + let obj2 = result.get(1).unwrap(); + assert_eq!(obj2.name, format!("{}2", p)); + + let obj3 = result.get(2).unwrap(); + assert_eq!(obj3.name, format!("{}3", p)); + } +} diff --git a/tests/simple_multithreads.rs b/tests/simple_multithreads.rs new file mode 100644 index 00000000..b1be3ccc --- /dev/null +++ b/tests/simple_multithreads.rs @@ -0,0 +1,69 @@ +use native_db::*; +use native_model::{native_model, Model}; +use serde::{Deserialize, Serialize}; +use shortcut_assert_fs::TmpFs; +use std::sync::Arc; +use std::thread; + +#[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] +#[native_model(id = 1, version = 1)] +#[native_db] +struct Item { + #[primary_key] + id: u32, + name: String, +} + +#[test] +fn multi_threads() { + let tf = TmpFs::new().unwrap(); + + let mut builder = DatabaseBuilder::new(); + builder.define::().unwrap(); + let db = builder.create(tf.path("test").as_std_path()).unwrap(); + + let db = Arc::new(db); + + thread::scope(|s| { + let db_thread_1 = db.clone(); + let handle_thread_1 = s.spawn(move || { + let item_a = Item { + id: 1, + name: "a".to_string(), + }; + { + let rw = db_thread_1.rw_transaction().unwrap(); + rw.insert(item_a).unwrap(); + rw.commit().unwrap(); + } + }); + + let db_thread_2 = db.clone(); + let handle_thread_2 = s.spawn(move || { + let item_b = Item { + id: 2, + name: "b".to_string(), + }; + { + let rw = db_thread_2.rw_transaction().unwrap(); + rw.insert(item_b).unwrap(); + rw.commit().unwrap(); + } + }); + + handle_thread_1.join().unwrap(); + handle_thread_2.join().unwrap(); + }); + + { + let r = db.r_transaction().unwrap(); + let len = r.len().primary::().unwrap(); + assert_eq!(len, 2); + + let item_a: Item = r.get().primary(1u32).unwrap().unwrap(); + assert_eq!(item_a.name, "a".to_string()); + + let item_b: Item = r.get().primary(2u32).unwrap().unwrap(); + assert_eq!(item_b.name, "b".to_string()); + } +} diff --git a/tests/skeptic.rs b/tests/skeptic.rs index bc953aed..192413f7 100644 --- a/tests/skeptic.rs +++ b/tests/skeptic.rs @@ -1,8 +1,9 @@ #[cfg(not(any( - target_os = "android", - target_arch = "i686", - target_arch = "aarch64", - target_arch = "armv7", - target_arch = "thumbv7neon", - target_arch = "x86_64")))] + target_os = "android", + target_arch = "i686", + target_arch = "aarch64", + target_arch = "armv7", + target_arch = "thumbv7neon", +)))] + include!(concat!(env!("OUT_DIR"), "/skeptic-tests.rs")); diff --git a/tests/snapshot.rs b/tests/snapshot.rs new file mode 100644 index 00000000..55129f8b --- /dev/null +++ b/tests/snapshot.rs @@ -0,0 +1,15 @@ +use native_db::DatabaseBuilder; +use shortcut_assert_fs::TmpFs; + +#[test] +fn test_snapshot() { + let tf = TmpFs::new().unwrap(); + let builder = DatabaseBuilder::new(); + let db = builder.create_in_memory().unwrap(); + db.snapshot(&builder, tf.path("snapshot.db").as_std_path()) + .unwrap(); + + // TODO: Check the snapshot is correct + + tf.display_dir_entries(); +} diff --git a/tests/tests.rs b/tests/tests.rs deleted file mode 100644 index b6053c40..00000000 --- a/tests/tests.rs +++ /dev/null @@ -1,6 +0,0 @@ -use shortcut_assert_fs::TmpFs; - -#[allow(dead_code)] -pub fn init() -> TmpFs { - TmpFs::new().unwrap() -} \ No newline at end of file diff --git a/tests/transaction.rs b/tests/transaction.rs new file mode 100644 index 00000000..40ad6637 --- /dev/null +++ b/tests/transaction.rs @@ -0,0 +1,114 @@ +use native_db::*; +use native_model::{native_model, Model}; +use serde::{Deserialize, Serialize}; +use shortcut_assert_fs::TmpFs; +use std::panic::AssertUnwindSafe; + +#[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] +#[native_model(id = 1, version = 1)] +#[native_db] +struct Item { + #[primary_key] + id: u32, + name: String, +} + +#[test] +fn test_transaction_obj_1() { + let tf = TmpFs::new().unwrap(); + + let mut builder = DatabaseBuilder::new(); + builder.define::().unwrap(); + let db = builder.create(tf.path("test").as_std_path()).unwrap(); + + let item = Item { + id: 1, + name: "test".to_string(), + }; + + let rw = db.rw_transaction().unwrap(); + rw.insert(item).unwrap(); + rw.commit().unwrap(); + + let r = db.r_transaction().unwrap(); + let result: Item = r.get().primary(1u32).unwrap().unwrap(); + assert_eq!(result.id, 1); +} + +#[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] +#[native_model(id = 2, version = 1)] +#[native_db] +struct Item2 { + #[primary_key] + id: u32, + name: String, +} + +#[test] +fn test_transaction_obj_1_and_obj_2() { + let tf = TmpFs::new().unwrap(); + + let mut builder = DatabaseBuilder::new(); + builder.define::().unwrap(); + builder.define::().unwrap(); + let db = builder.create(tf.path("test").as_std_path()).unwrap(); + + let item_1 = Item { + id: 1, + name: "test".to_string(), + }; + let item_2 = Item2 { + id: 2, + name: "test".to_string(), + }; + + let rw = db.rw_transaction().unwrap(); + rw.insert(item_1).unwrap(); + rw.insert(item_2).unwrap(); + rw.commit().unwrap(); + + let r = db.r_transaction().unwrap(); + let result: Item = r.get().primary(1u32).unwrap().unwrap(); + assert_eq!(result.id, 1); + let result: Item2 = r.get().primary(2u32).unwrap().unwrap(); + assert_eq!(result.id, 2); +} + +#[allow(unreachable_code)] +#[test] +fn test_transaction_fail() { + let tf = TmpFs::new().unwrap(); + + let mut builder = DatabaseBuilder::new(); + builder.define::().unwrap(); + let db = builder.create(tf.path("test").as_std_path()).unwrap(); + + let item_1 = Item { + id: 1, + name: "test".to_string(), + }; + + let rw = db.rw_transaction().unwrap(); + rw.insert(item_1).unwrap(); + rw.commit().unwrap(); + + let r = db.r_transaction().unwrap(); + let result: Item = r.get().primary(1u32).unwrap().unwrap(); + assert_eq!(result.id, 1); + + let item_2 = Item { + id: 2, + name: "test".to_string(), + }; + let result = std::panic::catch_unwind(AssertUnwindSafe(|| { + let rw = db.rw_transaction().unwrap(); + rw.insert(item_2).unwrap(); + panic!("Random panic here...") + })); + + assert!(result.is_err()); + + let r = db.r_transaction().unwrap(); + let result: Option = r.get().primary(2u32).unwrap(); + assert!(result.is_none()); +} diff --git a/tests/util.rs b/tests/util.rs new file mode 100644 index 00000000..f75c76cd --- /dev/null +++ b/tests/util.rs @@ -0,0 +1,40 @@ +use native_db::*; +use shortcut_assert_fs::TmpFs; + +#[test] +fn test_builder() { + let tf = TmpFs::new().unwrap(); + // Create without error + let mut _db = DatabaseBuilder::new().create(&tf.path("test")).unwrap(); +} + +#[test] +fn test_builder_with_set_cache_size() { + let tf = TmpFs::new().unwrap(); + // Create without error + let mut builder = DatabaseBuilder::new(); + let _db = builder + .set_cache_size(100) + .create(&tf.path("test")) + .unwrap(); +} + +#[test] +fn test_open_unexisting_database() { + let tf = TmpFs::new().unwrap(); + // Open an unexisting database + assert!(DatabaseBuilder::new().open(&tf.path("test")).is_err()); +} + +#[test] +fn test_open_existing_database() { + let tf = TmpFs::new().unwrap(); + + // Create a database + let builder = DatabaseBuilder::new(); + let db = builder.create(&tf.path("test")).unwrap(); + drop(db); + + // Open an existing database + let _db = DatabaseBuilder::new().open(&tf.path("test")).unwrap(); +} diff --git a/tests/watch/mod.rs b/tests/watch/mod.rs new file mode 100644 index 00000000..234d2f6e --- /dev/null +++ b/tests/watch/mod.rs @@ -0,0 +1,456 @@ +#![cfg(not(feature = "tokio"))] + +mod watch_optional; + +use native_db::watch::Event; +use native_db::*; +use native_model::{native_model, Model}; +use serde::{Deserialize, Serialize}; +use shortcut_assert_fs::TmpFs; +use std::sync::Arc; +use std::thread; +use std::time::Duration; + +pub const TIMEOUT: Duration = Duration::from_secs(1); + +#[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] +#[native_model(id = 1, version = 1)] +#[native_db] +struct ItemA { + #[primary_key] + id: u32, +} + +#[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] +#[native_model(id = 2, version = 1)] +#[native_db] +struct ItemB { + #[primary_key] + id: u32, +} + +#[test] +fn watch_one_primary_key() { + let tf = TmpFs::new().unwrap(); + + let mut builder = DatabaseBuilder::new(); + builder.define::().unwrap(); + let db = builder.create(tf.path("test").as_std_path()).unwrap(); + + let item_a = ItemA { id: 1 }; + + let (recv, _) = db.watch().get().primary::(item_a.id).unwrap(); + + let rw = db.rw_transaction().unwrap(); + rw.insert(item_a.clone()).unwrap(); + rw.commit().unwrap(); + + for _ in 0..1 { + let inner_event: ItemA = if let Event::Insert(event) = recv.recv_timeout(TIMEOUT).unwrap() { + event.inner() + } else { + panic!("wrong event") + }; + assert_eq!(inner_event, item_a); + } + assert!(recv.try_recv().is_err()); +} + +#[test] +fn watch_all_primary_key() { + let tf = TmpFs::new().unwrap(); + + let mut builder = DatabaseBuilder::new(); + builder.define::().unwrap(); + let db = builder.create(tf.path("test").as_std_path()).unwrap(); + + let item_a_1 = ItemA { id: 1 }; + let item_a_2 = ItemA { id: 2 }; + + let (recv, _) = db.watch().scan().primary().all::().unwrap(); + let rw = db.rw_transaction().unwrap(); + rw.insert(item_a_1.clone()).unwrap(); + rw.insert(item_a_2.clone()).unwrap(); + rw.commit().unwrap(); + + for _ in 0..2 { + let inner_event: ItemA = if let Event::Insert(event) = recv.recv_timeout(TIMEOUT).unwrap() { + event.inner() + } else { + panic!("wrong event") + }; + assert!(inner_event == item_a_1 || inner_event == item_a_2); + } + assert!(recv.try_recv().is_err()); +} + +#[test] +fn watch_multithreading() { + let tf = TmpFs::new().unwrap(); + + let mut builder: DatabaseBuilder = DatabaseBuilder::new(); + builder.define::().unwrap(); + let db = builder.create(tf.path("test").as_std_path()).unwrap(); + let db = Arc::new(db); + let dba = Arc::clone(&db); + + let item_a = ItemA { id: 1 }; + let (recv, _) = dba.watch().get().primary::(item_a.id).unwrap(); + + thread::scope(|s| { + let handle = s.spawn(move || { + let item_a = ItemA { id: 1 }; + let (recv, _) = dba.watch().get().primary::(item_a.id).unwrap(); + let rw = dba.rw_transaction().unwrap(); + { + let item_a = ItemA { id: 1 }; + rw.insert(item_a.clone()).unwrap(); + } + rw.commit().unwrap(); + for _ in 0..1 { + let inner_event: ItemA = + if let Event::Insert(event) = recv.recv_timeout(TIMEOUT).unwrap() { + event.inner() + } else { + panic!("wrong event") + }; + assert_eq!(inner_event, item_a); + } + }); + + let dbb = Arc::clone(&db); + let rw = dbb.rw_transaction().unwrap(); + { + let item_a = ItemA { id: 1 }; + rw.insert(item_a.clone()).unwrap(); + } + rw.commit().unwrap(); + + handle.join().unwrap(); + for _ in 0..2 { + let inner_event: ItemA = + if let Event::Insert(event) = recv.recv_timeout(TIMEOUT).unwrap() { + event.inner() + } else { + panic!("wrong event") + }; + assert_eq!(inner_event, item_a); + } + assert!(recv.try_recv().is_err()); + }); +} + +#[test] +fn watch_outside() { + let tf = TmpFs::new().unwrap(); + + let item_a = ItemA { id: 1 }; + let item_b_1 = ItemB { id: 1 }; + let item_b_2 = ItemB { id: 2 }; + + let mut builder = DatabaseBuilder::new(); + builder.define::().unwrap(); + builder.define::().unwrap(); + let db = builder.create(tf.path("test").as_std_path()).unwrap(); + + let (recv, _) = db.watch().get().primary::(item_b_1.id).unwrap(); + + let rw = db.rw_transaction().unwrap(); + rw.insert(item_a.clone()).unwrap(); + rw.insert(item_b_1.clone()).unwrap(); + rw.insert(item_b_2.clone()).unwrap(); + rw.commit().unwrap(); + + // Check that recv receives only 1 insert event + let inner_event: ItemB = if let Event::Insert(event) = recv.recv_timeout(TIMEOUT).unwrap() { + event.inner() + } else { + panic!("wrong event") + }; + assert_eq!(inner_event, item_b_1); + assert!(recv.try_recv().is_err()); +} + +#[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] +#[native_model(id = 3, version = 1)] +#[native_db] +struct ItemA1K { + #[primary_key] + id: u32, + #[secondary_key(unique)] + name: String, +} + +#[test] +fn watch_one_secondary_key() { + let tf = TmpFs::new().unwrap(); + + let mut builder = DatabaseBuilder::new(); + builder.define::().unwrap(); + let db = builder.create(tf.path("test").as_std_path()).unwrap(); + + let a = ItemA1K { + id: 1, + name: "a".to_string(), + }; + + let (recv, _) = db + .watch() + .get() + .secondary::(ItemA1KKey::name, a.name.clone()) + .unwrap(); + + let rw = db.rw_transaction().unwrap(); + rw.insert(a.clone()).unwrap(); + rw.commit().unwrap(); + + for _ in 0..1 { + let inner_event: ItemA1K = if let Event::Insert(event) = recv.recv_timeout(TIMEOUT).unwrap() + { + event.inner() + } else { + panic!("wrong event") + }; + assert_eq!(inner_event, a); + } + assert!(recv.try_recv().is_err()); +} + +#[test] +fn watch_all_secondary_keys() { + let tf = TmpFs::new().unwrap(); + + let mut builder = DatabaseBuilder::new(); + builder.define::().unwrap(); + let db = builder.create(tf.path("test").as_std_path()).unwrap(); + + let a1 = ItemA1K { + id: 1, + name: "a".to_string(), + }; + let a2 = ItemA1K { + id: 2, + name: "b".to_string(), + }; + + let (recv, _) = db + .watch() + .scan() + .secondary(ItemA1KKey::name) + .all::() + .unwrap(); + + let rw = db.rw_transaction().unwrap(); + rw.insert(a1.clone()).unwrap(); + rw.insert(a2.clone()).unwrap(); + rw.commit().unwrap(); + + for _ in 0..2 { + let inner_event: ItemA1K = if let Event::Insert(event) = recv.recv_timeout(TIMEOUT).unwrap() + { + event.inner() + } else { + panic!("wrong event") + }; + assert!(inner_event == a1 || inner_event == a2); + } + assert!(recv.try_recv().is_err()); +} + +#[test] +fn unwatch() { + let tf = TmpFs::new().unwrap(); + + let mut builder = DatabaseBuilder::new(); + builder.define::().unwrap(); + let db = builder.create(tf.path("test").as_std_path()).unwrap(); + + let item_a = ItemA { id: 1 }; + + let (recv, recv_id) = db.watch().get().primary::(item_a.id).unwrap(); + + let rw = db.rw_transaction().unwrap(); + rw.insert(item_a.clone()).unwrap(); + rw.commit().unwrap(); + + for _ in 0..1 { + let inner_event: ItemA = if let Event::Insert(event) = recv.recv_timeout(TIMEOUT).unwrap() { + event.inner() + } else { + panic!("wrong event") + }; + assert_eq!(inner_event, item_a); + } + + db.unwatch(recv_id).unwrap(); + + let rw = db.rw_transaction().unwrap(); + rw.insert(item_a.clone()).unwrap(); + rw.commit().unwrap(); + assert!(recv.try_recv().is_err()); +} + +#[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] +#[native_model(id = 4, version = 1)] +#[native_db] +struct ItemC { + #[primary_key] + name: String, +} + +#[test] +fn watch_start_with() { + let tf = TmpFs::new().unwrap(); + + let mut builder = DatabaseBuilder::new(); + builder.define::().unwrap(); + let db = builder.create(tf.path("test").as_std_path()).unwrap(); + + let c1 = ItemC { + name: "a_1".to_string(), + }; + let c2 = ItemC { + name: "a_2".to_string(), + }; + let c3 = ItemC { + name: "b_1".to_string(), + }; + + let (recv, _) = db + .watch() + .scan() + .primary() + .start_with::("a") + .unwrap(); + + let rw = db.rw_transaction().unwrap(); + rw.insert(c1.clone()).unwrap(); + rw.insert(c2.clone()).unwrap(); + rw.insert(c3.clone()).unwrap(); + rw.commit().unwrap(); + + for _ in 0..2 { + let inner_event: ItemC = if let Event::Insert(event) = recv.recv_timeout(TIMEOUT).unwrap() { + event.inner() + } else { + panic!("wrong event") + }; + assert!(inner_event == c1 || inner_event == c2); + } + assert!(recv.try_recv().is_err()); +} + +#[test] +fn watch_start_with_by_key() { + let tf = TmpFs::new().unwrap(); + + let mut builder = DatabaseBuilder::new(); + builder.define::().unwrap(); + let db = builder.create(tf.path("test").as_std_path()).unwrap(); + + let item_a_1_k = ItemA1K { + id: 1, + name: "a_1".to_string(), + }; + let item_a_2_k = ItemA1K { + id: 2, + name: "a_2".to_string(), + }; + let item_a_3_k = ItemA1K { + id: 3, + name: "b_1".to_string(), + }; + + let (recv, _) = db + .watch() + .scan() + .secondary(ItemA1KKey::name) + .start_with::("a") + .unwrap(); + + let rw = db.rw_transaction().unwrap(); + rw.insert(item_a_1_k.clone()).unwrap(); + rw.insert(item_a_2_k.clone()).unwrap(); + rw.insert(item_a_3_k.clone()).unwrap(); + rw.commit().unwrap(); + + for _ in 0..2 { + let inner_event: ItemA1K = if let Event::Insert(event) = recv.recv_timeout(TIMEOUT).unwrap() + { + event.inner() + } else { + panic!("wrong event") + }; + assert!(inner_event == item_a_1_k || inner_event == item_a_2_k); + } + assert!(recv.try_recv().is_err()); +} + +#[test] +fn watch_all_delete() { + let tf = TmpFs::new().unwrap(); + + let mut builder = DatabaseBuilder::new(); + builder.define::().unwrap(); + let db = builder.create(tf.path("test").as_std_path()).unwrap(); + + let item_a = ItemA { id: 1 }; + + let (recv, _) = db.watch().scan().primary().all::().unwrap(); + + let rw = db.rw_transaction().unwrap(); + rw.insert(item_a.clone()).unwrap(); + rw.commit().unwrap(); + + recv.recv_timeout(TIMEOUT).unwrap(); + + let rw = db.rw_transaction().unwrap(); + rw.remove(item_a.clone()).unwrap(); + rw.commit().unwrap(); + + for _ in 0..1 { + let r_a: ItemA = if let Event::Delete(event) = recv.recv_timeout(TIMEOUT).unwrap() { + event.inner() + } else { + panic!("wrong event") + }; + assert_eq!(r_a, item_a); + } + assert!(recv.try_recv().is_err()); +} + +#[test] +fn watch_all_update() { + let tf = TmpFs::new().unwrap(); + + let mut builder = DatabaseBuilder::new(); + builder.define::().unwrap(); + let db = builder.create(tf.path("test").as_std_path()).unwrap(); + + let item_a_1 = ItemA { id: 1 }; + let item_a_2 = ItemA { id: 2 }; + + let (recv, _) = db.watch().scan().primary().all::().unwrap(); + + let rw = db.rw_transaction().unwrap(); + rw.insert(item_a_1.clone()).unwrap(); + rw.commit().unwrap(); + + recv.recv_timeout(TIMEOUT).unwrap(); + + let rw = db.rw_transaction().unwrap(); + rw.update(item_a_1.clone(), item_a_2.clone()).unwrap(); + rw.commit().unwrap(); + + for _ in 0..1 { + let (old_r_a, new_r_a): (ItemA, ItemA) = + if let Event::Update(event) = recv.recv_timeout(TIMEOUT).unwrap() { + (event.inner_old(), event.inner_new()) + } else { + panic!("wrong event") + }; + assert_eq!(old_r_a, item_a_1); + assert_eq!(new_r_a, item_a_2); + } + assert!(recv.try_recv().is_err()); +} diff --git a/tests/watch/watch_optional.rs b/tests/watch/watch_optional.rs new file mode 100644 index 00000000..91ae5096 --- /dev/null +++ b/tests/watch/watch_optional.rs @@ -0,0 +1,127 @@ +use native_db::watch::Event; +use native_db::*; +use native_model::{native_model, Model}; +use serde::{Deserialize, Serialize}; +use shortcut_assert_fs::TmpFs; + +#[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] +#[native_model(id = 1, version = 1)] +#[native_db] +struct ItemAOptional { + #[primary_key] + id: u32, + #[secondary_key(unique, optional)] + name: Option, +} + +#[test] +fn watch_one_secondary_key_some() { + let tf = TmpFs::new().unwrap(); + + let mut builder = DatabaseBuilder::new(); + builder.define::().unwrap(); + let db = builder.create(tf.path("test").as_std_path()).unwrap(); + + let a = ItemAOptional { + id: 1, + name: Some("a".to_string()), + }; + + let (recv, _) = db + .watch() + .get() + .secondary::(ItemAOptionalKey::name, "a") + .unwrap(); + + let rw = db.rw_transaction().unwrap(); + rw.insert(a.clone()).unwrap(); + rw.commit().unwrap(); + + for _ in 0..1 { + let inner_event: ItemAOptional = + if let Event::Insert(event) = recv.recv_timeout(super::TIMEOUT).unwrap() { + event.inner() + } else { + panic!("wrong event") + }; + assert_eq!(inner_event, a); + } + assert!(recv.try_recv().is_err()); +} + +#[test] +fn watch_one_secondary_key_none() { + let tf = TmpFs::new().unwrap(); + + let mut builder = DatabaseBuilder::new(); + builder.define::().unwrap(); + let db = builder.create(tf.path("test").as_std_path()).unwrap(); + + let a = ItemAOptional { id: 1, name: None }; + + let (recv, _) = db + .watch() + .get() + .secondary::(ItemAOptionalKey::name, "a") + .unwrap(); + + let rw = db.rw_transaction().unwrap(); + rw.insert(a.clone()).unwrap(); + rw.commit().unwrap(); + + for _ in 0..1 { + let result = recv.recv_timeout(super::TIMEOUT); + assert!(result.is_err()); + assert!(matches!( + result.unwrap_err(), + std::sync::mpsc::RecvTimeoutError::Timeout + )); + } + assert!(recv.try_recv().is_err()); +} + +#[test] +fn watch_start_with_by_key() { + let tf = TmpFs::new().unwrap(); + + let mut builder = DatabaseBuilder::new(); + builder.define::().unwrap(); + let db = builder.create(tf.path("test").as_std_path()).unwrap(); + + let item_a_1_k = ItemAOptional { + id: 1, + name: Some("a_1".to_string()), + }; + let item_a_2_k = ItemAOptional { + id: 2, + name: Some("a_2".to_string()), + }; + let item_a_3_k = ItemAOptional { + id: 3, + name: Some("b_1".to_string()), + }; + + let (recv, _) = db + .watch() + .scan() + .secondary(ItemAOptionalKey::name) + .start_with::("a") + .unwrap(); + + let rw = db.rw_transaction().unwrap(); + rw.insert(item_a_1_k.clone()).unwrap(); + rw.insert(item_a_2_k.clone()).unwrap(); + rw.insert(item_a_3_k.clone()).unwrap(); + rw.commit().unwrap(); + + for _ in 0..2 { + let inner_event: ItemAOptional = + if let Event::Insert(event) = recv.recv_timeout(super::TIMEOUT).unwrap() { + event.inner() + } else { + panic!("wrong event") + }; + assert!(inner_event == item_a_1_k || inner_event == item_a_2_k); + } + assert!(recv.try_recv().is_err()); +} diff --git a/tests/watch_tokio.rs b/tests/watch_tokio.rs new file mode 100644 index 00000000..52eb24ff --- /dev/null +++ b/tests/watch_tokio.rs @@ -0,0 +1,44 @@ +#![cfg(feature = "tokio")] + +use native_db::watch::Event; +use native_db::*; +use native_model::{native_model, Model}; +use serde::{Deserialize, Serialize}; +use shortcut_assert_fs::TmpFs; + +#[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] +#[native_model(id = 1, version = 1)] +#[native_db] +struct ItemA { + #[primary_key] + id: u32, +} + +#[tokio::test] +async fn watch_one_primary_key() { + let tf = TmpFs::new().unwrap(); + + let mut builder = DatabaseBuilder::new(); + builder.define::().unwrap(); + let mut db = builder.create(tf.path("test").as_std_path()).unwrap(); + + let a = ItemA { id: 1 }; + + let (mut recv, _) = db.watch().get().primary::(a.id).unwrap(); + + let tx = db.rw_transaction().unwrap(); + tx.insert(a.clone()).unwrap(); + tx.commit().unwrap(); + + for _ in 0..1 { + let inner_event: ItemA = if let Event::Insert(event) = recv.recv().await.unwrap() { + event.inner() + } else { + panic!("wrong event") + }; + assert_eq!(inner_event, a); + } + assert!(recv.try_recv().is_err()); +} + +// TODO: maybe do others tests but it should the same as a std::sync::mpsc::channel. diff --git a/version_update.sh b/version_update.sh index f981b490..43adb449 100755 --- a/version_update.sh +++ b/version_update.sh @@ -2,7 +2,7 @@ DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -# Bash script to update version for struct_db and struct_db_macro +# Bash script to update version for native_db and native_db_macro # Semantic release version obtained from argument NEW_VERSION=$1 @@ -14,19 +14,21 @@ if [ -z "$NEW_VERSION" ]; then fi # Directories containing Cargo.toml files to update -declare -a directories=("." "struct_db_macro") +declare -a directories=("." "native_db_macro") for directory in "${directories[@]}" do # Check if Cargo.toml exists in the directory if [ -f "$directory/Cargo.toml" ]; then echo "Updating version in $directory/Cargo.toml to $NEW_VERSION" - # Use sed to find and replace the version string + # Use sed to find and replace the version string in the Cargo.toml sed -i -E "s/^version = \"[0-9]+\.[0-9]+\.[0-9]+\"/version = \"$NEW_VERSION\"/g" "$directory/Cargo.toml" + # Use sed to find and replace the version string in the README.md + sed -i -E "s/native_db = \"[0-9]+\.[0-9]+\.[0-9]+\"/native_db = \"$NEW_VERSION\"/g" "$directory/README.md" - # Update the dependency version for struct_db_macro in struct_db's Cargo.toml + # Update the dependency version for native_db_macro in native_db's Cargo.toml if [ "$directory" == "." ]; then - sed -i -E "s/struct_db_macro = \{ version = \"[0-9]+\.[0-9]+\.[0-9]+\", path = \"struct_db_macro\" \}/struct_db_macro = { version = \"$NEW_VERSION\", path = \"struct_db_macro\" }/g" "$directory/Cargo.toml" + sed -i -E "s/native_db_macro = \{ version = \"[0-9]+\.[0-9]+\.[0-9]+\", path = \"native_db_macro\" \}/native_db_macro = { version = \"$NEW_VERSION\", path = \"native_db_macro\" }/g" "$directory/Cargo.toml" fi fi done