From d0124bdcf0d082263c75d6108fc7f1f39dcf4f09 Mon Sep 17 00:00:00 2001 From: wasm-forge <122647775+wasm-forge@users.noreply.github.com> Date: Sun, 13 Oct 2024 13:57:13 +0200 Subject: [PATCH 1/3] add metadata cache, improved overwriting in short segments, refactor read/write iterator --- CHANGELOG.md | 14 ++- Cargo.lock | 48 ++++---- Cargo.toml | 10 +- src/filename_cache.rs | 3 +- src/fs.rs | 24 ++-- src/runtime/structure_helpers.rs | 9 +- src/storage.rs | 1 + src/storage/chunk_iterator.rs | 76 +++++-------- src/storage/metadata_cache.rs | 167 ++++++++++++++++++++++++++++ src/storage/ptr_cache.rs | 112 ++++++++++++++++--- src/storage/stable.rs | 170 +++++++++++++++++------------ tests/a | 7 ++ tests/b.sh | 8 ++ tests/canister_initial/Cargo.lock | 10 +- tests/canister_upgraded/Cargo.lock | 10 +- tests/fsperf.repl | 84 ++++++++++++++ 16 files changed, 554 insertions(+), 199 deletions(-) create mode 100644 src/storage/metadata_cache.rs create mode 100755 tests/a create mode 100755 tests/b.sh create mode 100755 tests/fsperf.repl diff --git a/CHANGELOG.md b/CHANGELOG.md index 1504f3c..f6ef700 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,10 +1,12 @@ # Changelog -## [unreleased] - -- Upgrade Pocket-ic client version to V5.0 -- Filename cache added (faster repeated opening of the same file) +## [v0.6.4] +- Upgrade Pocket-ic client version to V5.0. +- Filename cache added (faster repeated opening of the same file). +- Refactor v2 chunks reading and writing (reuse the same iteration mechanism). +- Medatada cache for regular files (faster overwriting in small segments). +- Dependency updates to the latest versions ## [v0.6.3] @@ -30,7 +32,6 @@ - add V2 chunks support. - *API change:* mounted memory file support. - ## [v0.5.1] - use newer ic-cdk version. @@ -42,7 +43,8 @@ - *API change:* init with memory manager using memory index range rather than first memory index. -[unreleased]: https://github.com/wasm-forge/stable-fs/compare/v0.6.3...main +[unreleased]: https://github.com/wasm-forge/stable-fs/compare/v0.6.4...main +[v0.6.4]: https://github.com/wasm-forge/stable-fs/compare/v0.6.3...v0.6.4 [v0.6.3]: https://github.com/wasm-forge/stable-fs/compare/v0.6.2...v0.6.3 [v0.6.2]: https://github.com/wasm-forge/stable-fs/compare/v0.6.1...v0.6.2 [v0.6.1]: https://github.com/wasm-forge/stable-fs/compare/v0.6.0...v0.6.1 diff --git a/Cargo.lock b/Cargo.lock index 120f402..40f6c16 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -108,9 +108,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.4.1" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" +checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" [[package]] name = "block-buffer" @@ -141,9 +141,9 @@ checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3" [[package]] name = "candid" -version = "0.10.8" +version = "0.10.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd5902d37352dffd8bd9177a2daa6444ce3cd0279c91763fb0171c053aa04335" +checksum = "6c30ee7f886f296b6422c0ff017e89dd4f831521dfdcc76f3f71aae1ce817222" dependencies = [ "anyhow", "binread", @@ -191,9 +191,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "ciborium" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "effd91f6c78e5a4ace8a5d3c0b6bfaec9e2baaef55f3efc00e45fb2e477ee926" +checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" dependencies = [ "ciborium-io", "ciborium-ll", @@ -202,15 +202,15 @@ dependencies = [ [[package]] name = "ciborium-io" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdf919175532b369853f5d5e20b26b43112613fd6fe7aee757e35f7a44642656" +checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" [[package]] name = "ciborium-ll" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "defaa24ecc093c77630e6c15e17c51f5e187bf35ee514f4e2d67baaa96dae22b" +checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" dependencies = [ "ciborium-io", "half", @@ -265,6 +265,12 @@ version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" +[[package]] +name = "crunchy" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" + [[package]] name = "crypto-common" version = "0.1.6" @@ -452,9 +458,13 @@ dependencies = [ [[package]] name = "half" -version = "1.8.2" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" +checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888" +dependencies = [ + "cfg-if", + "crunchy", +] [[package]] name = "hashbrown" @@ -1071,7 +1081,7 @@ version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.6.0", ] [[package]] @@ -1323,9 +1333,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.209" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99fce0ffe7310761ca6bf9faf5115afbc19688edd00171d81b1bb1b116c63e09" +checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" dependencies = [ "serde_derive", ] @@ -1341,9 +1351,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.209" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5831b979fd7b5439637af1752d535ff49f4860c0f341d1baeb6faf0f4242170" +checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2", "quote", @@ -1479,9 +1489,9 @@ checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" [[package]] name = "stable-fs" -version = "0.6.3" +version = "0.6.4" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.6.0", "candid", "ciborium", "ic-cdk 0.16.0", diff --git a/Cargo.toml b/Cargo.toml index bc3d437..bf551ba 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "stable-fs" -version = "0.6.3" +version = "0.6.4" edition = "2021" description = "A Simple File system implementing WASI endpoints and using the stable structures of the Internet Computer" keywords = ["ic", "internet-computer", "file-system"] @@ -8,13 +8,13 @@ license = "MIT" repository = "https://github.com/wasm-forge/stable-fs" [dependencies] -bitflags = "2.3.1" +bitflags = "2.6.0" ic-cdk = "0.16" ic-stable-structures = "0.6.5" -serde = "1.0.164" +serde = "1.0.210" serde_bytes = "0.11" -ciborium = "0.2.1" +ciborium = "0.2.2" [dev-dependencies] -candid = "0.10.8" +candid = "0.10.10" pocket-ic = "5.0.0" diff --git a/src/filename_cache.rs b/src/filename_cache.rs index 970aa2b..845fab5 100644 --- a/src/filename_cache.rs +++ b/src/filename_cache.rs @@ -18,11 +18,10 @@ impl FilenameCache { // add new cache pointer pub fn add(&mut self, key: (Node, String), value: Node) { - if self.nodes.len() + 1 > CACHE_CAPACITY { self.clear(); } - + self.nodes.insert(key, value); } diff --git a/src/fs.rs b/src/fs.rs index aeefcee..9f7b40f 100644 --- a/src/fs.rs +++ b/src/fs.rs @@ -400,7 +400,7 @@ impl FileSystem { if !flags.contains(OpenFlags::CREATE) { return Err(Error::NotFound); } - + if flags.contains(OpenFlags::DIRECTORY) { return Err(Error::InvalidFileType); } @@ -1327,8 +1327,6 @@ mod tests { let content = format!("{i}"); let times = SIZE_OF_FILE / content.len(); - println!("Writing to {filename}"); - write_text_file(&mut fs, root_fd, filename.as_str(), content.as_str(), times) .unwrap(); } @@ -1338,8 +1336,6 @@ mod tests { let filename = format!("{}/my_file_{}.txt", dir_name, i); let expected_content = format!("{i}{i}{i}"); - println!("Reading {}", filename); - let text_read = read_text_file( &mut fs, root_fd, @@ -1589,14 +1585,12 @@ mod tests { let content = read_text_file(&mut fs, root_fd, filename, 0, 100); assert_eq!(content, "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"); - println!("{:?}", content); write_text_at_offset(&mut fs, fd, "abc", 3, 3).unwrap(); let content = read_text_file(&mut fs, root_fd, filename, 1, 100); assert_eq!(content, "\0\0abcabcabc\0\0\0"); - println!("{:?}", content); } } @@ -1613,14 +1607,18 @@ mod tests { let content = read_text_file(&mut fs, root_fd, filename, 0, chunk_size * 10); let vec = vec![0; chunk_size * 2 + 500]; - let expected = String::from_utf8(vec).unwrap(); + // expect all zeroes at first assert_eq!(content, expected); + // write some text to the first chunk write_text_at_offset(&mut fs, fd, "abc", 33, 3).unwrap(); + + // write some text to the 100th position of the third chunk write_text_at_offset(&mut fs, fd, "abc", 33, chunk_size as FileSize * 2 + 100).unwrap(); + // read what we have now let content = read_text_file(&mut fs, root_fd, filename, 0, chunk_size * 10); let mut expected = vec![0u8; chunk_size * 2 + 500]; @@ -1668,13 +1666,11 @@ mod tests { } } - #[test] fn filename_cached_on_open_or_create() { let filename = "test.txt"; for mut fs in test_fs_setups("") { - let root_fd = fs.root_fd(); let fd = fs .open_or_create(root_fd, filename, FdStat::default(), OpenFlags::CREATE, 12) @@ -1683,7 +1679,6 @@ mod tests { fs.close(fd).unwrap(); assert_eq!(fs.names_cache.get_nodes().len(), 1); - } } @@ -1692,7 +1687,6 @@ mod tests { let filename = "test.txt"; for mut fs in test_fs_setups("") { - let root_fd = fs.root_fd(); let fd = fs .open_or_create(root_fd, filename, FdStat::default(), OpenFlags::CREATE, 12) @@ -1700,12 +1694,12 @@ mod tests { fs.close(fd).unwrap(); fs.remove_file(root_fd, filename).unwrap(); - + assert_eq!(fs.names_cache.get_nodes().len(), 0); // check we don't increase cache when the file is opened but not created - let fd2 = fs - .open_or_create(root_fd, filename, FdStat::default(), OpenFlags::empty(), 12); + let fd2 = + fs.open_or_create(root_fd, filename, FdStat::default(), OpenFlags::empty(), 12); assert_eq!(fd2, Err(Error::NotFound)); assert_eq!(fs.names_cache.get_nodes().len(), 0); diff --git a/src/runtime/structure_helpers.rs b/src/runtime/structure_helpers.rs index 10cc954..8fe151f 100644 --- a/src/runtime/structure_helpers.rs +++ b/src/runtime/structure_helpers.rs @@ -80,13 +80,12 @@ pub fn find_node( match find_result { Ok(result) => { - names_cache.add(key, result.node); - return Ok(result.node); - }, + Ok(result.node) + } Err(e) => { - return Err(e); + Err(e) } } } @@ -473,8 +472,10 @@ pub fn get_chunk_infos(start: FileSize, end: FileSize, chunk_size: usize) -> Vec let mut result = vec![]; let start_index = offset_to_file_chunk_index(start, chunk_size); let end_index = offset_to_file_chunk_index(end, chunk_size); + for index in start_index..=end_index { let start_of_chunk = file_chunk_index_to_offset(index, chunk_size); + assert!(start_of_chunk <= end); let start_in_chunk = start_of_chunk.max(start) - start_of_chunk; let end_in_chunk = (start_of_chunk + chunk_size as FileSize).min(end) - start_of_chunk; diff --git a/src/storage.rs b/src/storage.rs index 300094d..9ed2f0f 100644 --- a/src/storage.rs +++ b/src/storage.rs @@ -12,6 +12,7 @@ mod chunk_iterator; pub mod dummy; mod journal; mod ptr_cache; +mod metadata_cache; pub mod stable; pub mod transient; pub mod types; diff --git a/src/storage/chunk_iterator.rs b/src/storage/chunk_iterator.rs index 20d45f8..7ce3b9f 100644 --- a/src/storage/chunk_iterator.rs +++ b/src/storage/chunk_iterator.rs @@ -7,15 +7,13 @@ use ic_stable_structures::memory_manager::VirtualMemory; use ic_stable_structures::BTreeMap; use ic_stable_structures::Memory; +use super::ptr_cache::CachedChunkPtr; use super::ptr_cache::PtrCache; pub(crate) struct ChunkV2Iterator<'a, M: Memory> { node: Node, last_index_excluded: FileChunkIndex, cur_index: FileChunkIndex, - - is_prefetched: bool, - ptr_cache: &'a mut PtrCache, v2_chunk_ptr: &'a mut BTreeMap<(Node, FileChunkIndex), FileChunkPtr, VirtualMemory>, } @@ -24,30 +22,26 @@ impl<'a, M: Memory> ChunkV2Iterator<'a, M> { pub fn new( node: Node, offset: FileSize, - file_size: FileSize, + last_address: FileSize, chunk_size: FileSize, ptr_cache: &'a mut PtrCache, v2_chunk_ptr: &'a mut BTreeMap<(Node, FileChunkIndex), FileChunkPtr, VirtualMemory>, ) -> Self { let cur_index = (offset / chunk_size) as FileChunkIndex; - let last_index_excluded = (file_size / chunk_size + 1) as FileChunkIndex; + let last_index_excluded = (last_address / chunk_size + 1) as FileChunkIndex; Self { node, last_index_excluded, cur_index, - is_prefetched: false, ptr_cache, v2_chunk_ptr, } } } -// for short reads it is better to cache some more chunks than the minimum required -const EXTRA_CACHE: u32 = 20; - impl<'a, M: Memory> Iterator for ChunkV2Iterator<'a, M> { - type Item = ((Node, FileChunkIndex), Option); + type Item = ((Node, FileChunkIndex), CachedChunkPtr); fn next(&mut self) -> Option { // we are at the end of the list, return None @@ -58,29 +52,22 @@ impl<'a, M: Memory> Iterator for ChunkV2Iterator<'a, M> { // try get cached item first let ptr = self.ptr_cache.get((self.node, self.cur_index)); - if ptr.is_some() { + if let Some(chunk_ptr) = ptr { + let ret = Some(((self.node, self.cur_index), chunk_ptr)); self.cur_index += 1; - return Some(((self.node, self.cur_index), ptr)); + return ret; } // cache failed, resort to reading the ranged values from the iterator - if !self.is_prefetched { - let range = - (self.node, self.cur_index)..(self.node, self.last_index_excluded + EXTRA_CACHE); - let items = self.v2_chunk_ptr.range(range); - - let mut new_cache = - Vec::with_capacity(self.last_index_excluded as usize - self.cur_index as usize); - for item in items { - new_cache.push(item); - } - - self.ptr_cache.add(new_cache); - - self.is_prefetched = true; - } + self.ptr_cache.add_range( + self.node, + self.cur_index, + self.last_index_excluded, + self.v2_chunk_ptr, + ); - let found: Option = self.ptr_cache.get((self.node, self.cur_index)); + // + let found: CachedChunkPtr = self.ptr_cache.get((self.node, self.cur_index)).unwrap(); let res = Some(((self.node, self.cur_index), found)); @@ -94,6 +81,7 @@ impl<'a, M: Memory> Iterator for ChunkV2Iterator<'a, M> { mod tests { use crate::fs::FileSize; use crate::storage::chunk_iterator::ChunkV2Iterator; + use crate::storage::ptr_cache::CachedChunkPtr; use crate::storage::stable::StableStorage; use crate::storage::types::{FileType, Metadata, Node, Times}; use crate::storage::Storage; @@ -143,11 +131,9 @@ mod tests { let res_vec: Vec<_> = iterator.collect(); - assert!(res_vec[0].1.is_some()); - assert!(res_vec[1].1.is_some()); - assert!(res_vec[2].1.is_some()); - - println!("{:?}", res_vec); + assert!(res_vec[0].1 != CachedChunkPtr::ChunkMissing); + assert!(res_vec[1].1 != CachedChunkPtr::ChunkMissing); + assert!(res_vec[2].1 != CachedChunkPtr::ChunkMissing); } #[test] @@ -171,11 +157,9 @@ mod tests { let res_vec: Vec<_> = iterator.collect(); - assert!(res_vec[0].1.is_none()); - assert!(res_vec[1].1.is_none()); - assert!(res_vec[2].1.is_none()); - - println!("{:?}", res_vec); + assert!(res_vec[0].1 == CachedChunkPtr::ChunkMissing); + assert!(res_vec[1].1 == CachedChunkPtr::ChunkMissing); + assert!(res_vec[2].1 == CachedChunkPtr::ChunkMissing); } #[test] @@ -202,11 +186,9 @@ mod tests { let res_vec: Vec<_> = iterator.collect(); - println!("{:?}", res_vec); - - assert!(res_vec[0].1.is_some()); - assert!(res_vec[1].1.is_none()); - assert!(res_vec[2].1.is_some()); + assert!(res_vec[0].1 != CachedChunkPtr::ChunkMissing); + assert!(res_vec[1].1 == CachedChunkPtr::ChunkMissing); + assert!(res_vec[2].1 != CachedChunkPtr::ChunkMissing); } #[test] @@ -233,10 +215,8 @@ mod tests { let res_vec: Vec<_> = iterator.collect(); - println!("{:?}", res_vec); - - assert!(res_vec[0].1.is_none()); - assert!(res_vec[1].1.is_some()); - assert!(res_vec[2].1.is_none()); + assert!(res_vec[0].1 == CachedChunkPtr::ChunkMissing); + assert!(res_vec[1].1 != CachedChunkPtr::ChunkMissing); + assert!(res_vec[2].1 == CachedChunkPtr::ChunkMissing); } } diff --git a/src/storage/metadata_cache.rs b/src/storage/metadata_cache.rs new file mode 100644 index 0000000..770f060 --- /dev/null +++ b/src/storage/metadata_cache.rs @@ -0,0 +1,167 @@ +use std::collections::HashMap; + +use super::types::{Metadata, Node}; +use std::cell::RefCell; +use std::rc::Rc; + +const CACHE_CAPACITY: usize = 100; + +#[derive(Debug)] +pub(crate) struct MetadataCache { + meta: Rc>>, +} + +impl MetadataCache { + pub fn new() -> MetadataCache { + let meta: Rc>> = Rc::new(RefCell::new(HashMap::with_capacity(CACHE_CAPACITY))); + + MetadataCache { meta } + } + + // add new cache meta + pub fn update(&self, node: Node, new_meta: &Metadata) { + + let mut meta = (*self.meta).borrow_mut(); + + if meta.len() + 1 > CACHE_CAPACITY { + meta.clear(); + } + + meta.insert(node, new_meta.clone()); + } + + // clear cache completely + pub fn clear(&self) { + let mut meta = (*self.meta).borrow_mut(); + + meta.clear(); + } + + pub fn get(&self, node: Node) -> std::option::Option { + let meta = (*self.meta).borrow(); + + meta.get(&node).cloned() + } +} + +#[cfg(test)] +mod tests { + use crate::{fs::ChunkType, storage::types::{FileType, Times}}; + + use super::*; + + #[test] + fn cache_initialization() { + let cache = MetadataCache::new(); + assert_eq!(cache.meta.borrow().len(), 0, "Cache should be empty on initialization"); + } + + #[test] + fn cache_update_and_get() { + let cache = MetadataCache::new(); + let node = 1 as Node; + let metadata = Metadata { + node, + file_type: FileType::RegularFile, + link_count: 1, + size: 45, + times: Times { + accessed: 0, + modified: 0, + created: 0, + }, + first_dir_entry: None, + last_dir_entry: None, + chunk_type: Some(ChunkType::V2), + }; + + cache.update(node, &metadata); + + // Check if the metadata can be retrieved correctly + let retrieved = cache.get(node); + assert_eq!(retrieved, Some(metadata), "Retrieved metadata should match inserted metadata"); + } + + #[test] + fn cache_clear() { + let cache = MetadataCache::new(); + let node = 1 as Node; + + let metadata = Metadata { + node, + file_type: FileType::RegularFile, + link_count: 1, + size: 45, + times: Times { + accessed: 0, + modified: 0, + created: 0, + }, + first_dir_entry: None, + last_dir_entry: None, + chunk_type: Some(ChunkType::V2), + }; + + cache.update(node, &metadata); + cache.clear(); + + // Cache should be empty after clear + assert_eq!(cache.meta.borrow().len(), 0, "Cache should be empty after clearing"); + assert_eq!(cache.get(node), None, "Metadata should be None after cache is cleared"); + } + + #[test] + fn cache_eviction_when_capacity_exceeded() { + let cache = MetadataCache::new(); + + // Fill the cache to its capacity + for i in 0..CACHE_CAPACITY { + let node = i as Node; + + let metadata = Metadata { + node, + file_type: FileType::RegularFile, + link_count: 1, + size: 45, + times: Times { + accessed: 0, + modified: 0, + created: 0, + }, + first_dir_entry: None, + last_dir_entry: None, + chunk_type: Some(ChunkType::V2), + }; + + cache.update(node, &metadata); + } + + assert_eq!(cache.meta.borrow().len(), CACHE_CAPACITY, "Cache should have CACHE_CAPACITY entries"); + + // Add one more item to trigger eviction (if implemented) + let extra_node = 1000 as Node; + + let extra_metadata = Metadata { + node: extra_node, + file_type: FileType::RegularFile, + link_count: 1, + size: 475, + times: Times { + accessed: 0, + modified: 0, + created: 0, + }, + first_dir_entry: None, + last_dir_entry: None, + chunk_type: Some(ChunkType::V2), + }; + + cache.update(extra_node, &extra_metadata); + + assert!(cache.meta.borrow().len() <= CACHE_CAPACITY, "Cache should not exceed CACHE_CAPACITY"); + assert!(cache.get(extra_node).is_some(), "Extra node should be in the cache after eviction"); + } +} + + + diff --git a/src/storage/ptr_cache.rs b/src/storage/ptr_cache.rs index 5e82f58..332a769 100644 --- a/src/storage/ptr_cache.rs +++ b/src/storage/ptr_cache.rs @@ -1,22 +1,35 @@ use std::collections::HashMap; use super::types::{FileChunkIndex, FileChunkPtr, Node}; +use ic_stable_structures::memory_manager::VirtualMemory; +use ic_stable_structures::BTreeMap; + +const CACHE_CAPACITY: usize = 1000; +// for short reads and writes it is better to cache some more chunks than the minimum required +const MIN_CACHE_CHUNKS: u32 = 10; + +#[derive(PartialEq, Debug, Clone, Copy)] +pub enum CachedChunkPtr { + // the chunk exists and we know its location + ChunkExists(FileChunkPtr), + // the chunk doesn't exist + ChunkMissing, +} -static CACHE_CAPACITY: usize = 1000; - +#[derive(Debug)] pub(crate) struct PtrCache { - pointers: HashMap<(Node, FileChunkIndex), FileChunkPtr>, + pointers: HashMap<(Node, FileChunkIndex), CachedChunkPtr>, } impl PtrCache { pub fn new() -> PtrCache { - let pointers: HashMap<(Node, FileChunkIndex), FileChunkPtr> = + let pointers: HashMap<(Node, FileChunkIndex), CachedChunkPtr> = HashMap::with_capacity(CACHE_CAPACITY); PtrCache { pointers } } // add new cache pointer - pub fn add(&mut self, cache_pairs: Vec<((Node, FileChunkIndex), FileChunkPtr)>) { + pub fn add(&mut self, cache_pairs: Vec<((Node, FileChunkIndex), CachedChunkPtr)>) { if self.pointers.len() + cache_pairs.len() > CACHE_CAPACITY { self.clear(); } @@ -26,12 +39,58 @@ impl PtrCache { } } + pub fn add_range( + &mut self, + node: Node, + from_index: FileChunkIndex, + to_index: FileChunkIndex, + v2_chunk_ptr: &BTreeMap<(Node, FileChunkIndex), FileChunkPtr, VirtualMemory>, + ) { + let to_index = to_index.max(from_index + MIN_CACHE_CHUNKS); + + let range = (node, from_index)..(node, to_index); + + let items = v2_chunk_ptr.range(range); + + let mut new_cache = Vec::with_capacity(to_index as usize - from_index as usize); + + let mut cur_index = from_index; + + let mut iterator_empty = true; + + for ((n, index), ptr) in items { + assert!(node == n); + + iterator_empty = false; + + while cur_index < index { + new_cache.push(((node, cur_index), CachedChunkPtr::ChunkMissing)); + cur_index += 1; + } + + assert!(cur_index == index); + + new_cache.push(((node, cur_index), CachedChunkPtr::ChunkExists(ptr))); + cur_index += 1; + } + + // if no chunks were found, fill the whole vector with missing chunks + if iterator_empty { + while cur_index < to_index { + new_cache.push(((node, cur_index), CachedChunkPtr::ChunkMissing)); + cur_index += 1; + } + } + + self.add(new_cache); + } + // clear cache completely pub fn clear(&mut self) { self.pointers.clear(); } - pub fn get(&self, key: (Node, FileChunkIndex)) -> std::option::Option { + pub fn get(&self, key: (Node, FileChunkIndex)) -> std::option::Option { self.pointers.get(&key).copied() } } @@ -44,7 +103,17 @@ mod tests { fn add_and_get() { let mut cache = PtrCache::new(); let key = (5 as Node, 7 as FileChunkIndex); - let value = 34 as FileChunkPtr; + let value = CachedChunkPtr::ChunkExists(34 as FileChunkPtr); + cache.add(vec![(key, value)]); + + assert_eq!(cache.get(key), Some(value)); + } + + #[test] + fn add_and_get_missing() { + let mut cache = PtrCache::new(); + let key = (5 as Node, 7 as FileChunkIndex); + let value = CachedChunkPtr::ChunkMissing; cache.add(vec![(key, value)]); assert_eq!(cache.get(key), Some(value)); @@ -54,7 +123,7 @@ mod tests { fn get_none_existing() { let mut cache = PtrCache::new(); let key = (5 as Node, 7 as FileChunkIndex); - let value = 34 as FileChunkPtr; + let value = CachedChunkPtr::ChunkExists(34 as FileChunkPtr); cache.add(vec![(key, value)]); assert_eq!(cache.get((5 as Node, 8 as FileChunkIndex)), None); @@ -64,7 +133,7 @@ mod tests { fn test_clear() { let mut cache = PtrCache::new(); let key = (5 as Node, 7 as FileChunkIndex); - let value = 34 as FileChunkPtr; + let value = CachedChunkPtr::ChunkExists(34 as FileChunkPtr); cache.add(vec![(key, value)]); cache.clear(); assert_eq!(cache.get(key), None); @@ -74,21 +143,28 @@ mod tests { fn check_clear_cache_happens() { let mut cache = PtrCache::new(); - for i in 0..CACHE_CAPACITY + 5 { - cache.add(vec![( - (5 as Node, i as FileChunkIndex), - i as u64 * 4096 as FileChunkPtr, - )]); - } - + // arrange let mut expected_insertions: Vec<_> = Vec::new(); - for i in (CACHE_CAPACITY)..(CACHE_CAPACITY + 5) { + for i in CACHE_CAPACITY..(CACHE_CAPACITY + 5) { expected_insertions.push(( (5 as Node, i as FileChunkIndex), - i as u64 * 4096 as FileChunkPtr, + CachedChunkPtr::ChunkExists(i as u64 * 4096 as FileChunkPtr), )); } + // act + for i in 0..CACHE_CAPACITY + 5 { + cache.add(vec![( + (5 as Node, i as FileChunkIndex), + CachedChunkPtr::ChunkExists(i as u64 * 4096 as FileChunkPtr), + )]); + } + + // assert assert_eq!(cache.pointers.len(), 5); + + for (k, v) in expected_insertions { + assert_eq!(v, cache.get(k).unwrap()); + } } } diff --git a/src/storage/stable.rs b/src/storage/stable.rs index cd893f6..753a6cb 100644 --- a/src/storage/stable.rs +++ b/src/storage/stable.rs @@ -6,6 +6,8 @@ use ic_stable_structures::{ BTreeMap, Cell, Memory, }; +use crate::storage::ptr_cache::CachedChunkPtr; + use crate::{ error::Error, runtime::{ @@ -16,15 +18,10 @@ use crate::{ }; use super::{ - allocator::ChunkPtrAllocator, - chunk_iterator::ChunkV2Iterator, - journal::CacheJournal, - ptr_cache::PtrCache, - types::{ + allocator::ChunkPtrAllocator, chunk_iterator::ChunkV2Iterator, journal::CacheJournal, metadata_cache::MetadataCache, ptr_cache::PtrCache, types::{ DirEntry, DirEntryIndex, FileChunk, FileChunkIndex, FileChunkPtr, FileSize, FileType, Header, Metadata, Node, Times, FILE_CHUNK_SIZE_V1, MAX_FILE_CHUNK_SIZE_V2, - }, - Storage, + }, Storage }; const ROOT_NODE: Node = 0; @@ -95,11 +92,11 @@ pub struct StableStorage { // chunk type when creating new files chunk_type: ChunkType, - // + // chunk pointer cache pub(crate) ptr_cache: PtrCache, - // only use it with normal files (not mounted) - last_metadata: (Node, Metadata), + // only use it with non-mounted files + meta_cache: MetadataCache, } impl StableStorage { @@ -207,7 +204,8 @@ impl StableStorage { // default chunk type is V2 chunk_type: ChunkType::V2, ptr_cache: PtrCache::new(), - last_metadata: (u64::MAX, Metadata::default()), + + meta_cache: MetadataCache::new(), }; let version = result.header.get().version; @@ -266,57 +264,87 @@ impl StableStorage { self.filechunk.insert((node, index), entry); } - // Insert or update a selected file chunk with the data provided in a buffer. - fn write_filechunk_v2( + fn write_chunks_v2( &mut self, node: Node, - index: FileChunkIndex, offset: FileSize, buf: &[u8], - ) { - let len = buf.len(); + ) -> Result { + let mut remainder = buf.len() as FileSize; + let last_address = offset + remainder; + + let chunk_size = self.chunk_size(); + + let start_index = (offset / chunk_size as FileSize) as FileChunkIndex; + + let mut chunk_offset = offset - start_index as FileSize * chunk_size as FileSize; - assert!(len <= self.v2_allocator.chunk_size()); + let mut size_written: FileSize = 0; - let mut chunk_ptr = self.ptr_cache.get((node, index)); + let write_iter = ChunkV2Iterator::new( + node, + offset, + last_address, + self.chunk_size() as FileSize, + &mut self.ptr_cache, + &mut self.v2_chunk_ptr, + ); + + let write_iter: Vec<_> = write_iter.collect(); - if chunk_ptr.is_none() { - // prefill cache with some values, if they exist among the file chunks already - let mut new_cache: Vec<((Node, FileChunkIndex), FileChunkPtr)> = Vec::with_capacity(20); - for pair in self.v2_chunk_ptr.range((node, index)..(node, index + 20)) { - new_cache.push(pair); + for ((nd, index), chunk_ptr) in write_iter { + assert!(nd == node); + + if remainder == 0 { + break; } - self.ptr_cache.add(new_cache); - chunk_ptr = self.ptr_cache.get((node, index)); - } + let to_write = remainder + .min(chunk_size as FileSize - chunk_offset) + .min(buf.len() as FileSize - size_written); - if chunk_ptr.is_none() { - // chunk is still not found, means it does not exist, we will have to create it + let write_buf = + &buf[size_written as usize..(size_written as usize + to_write as usize)]; - let ptr = self.v2_allocator.allocate(); - // init with 0 - let remainder = self.chunk_size() as FileSize - (offset as FileSize + len as FileSize); + let chunk_ptr = if let CachedChunkPtr::ChunkExists(ptr) = chunk_ptr { + ptr + } else { + // insert new chunk + let ptr = self.v2_allocator.allocate(); - grow_memory(&self.v2_chunks, ptr + self.chunk_size() as FileSize); + grow_memory(&self.v2_chunks, ptr + chunk_size as FileSize); - self.v2_chunks.write( - ptr + offset + len as FileSize, - &ZEROES[0..remainder as usize], - ); + // fill new chunk with zeroes (appart from the area that will be overwritten) - self.v2_chunk_ptr.insert((node, index), ptr); - self.ptr_cache.add(vec![((node, index), ptr)]); + // fill before written content + self.v2_chunks.write(ptr, &ZEROES[0..chunk_offset as usize]); - chunk_ptr = Some(ptr); - } + // fill after written content + self.v2_chunks.write( + ptr + chunk_offset + to_write as FileSize, + &ZEROES[0..(chunk_size - chunk_offset as usize - to_write as usize)], + ); + + // register new chunk pointer + self.v2_chunk_ptr.insert((node, index), ptr); - // the pointer has to be defined by now - let chunk_ptr = chunk_ptr.unwrap(); + // + self.ptr_cache + .add(vec![((node, index), CachedChunkPtr::ChunkExists(ptr))]); - grow_memory(&self.v2_chunks, chunk_ptr + offset + buf.len() as FileSize); + ptr + }; - self.v2_chunks.write(chunk_ptr + offset, buf); + // growing here should not be required as the grow is called during + // grow_memory(&self.v2_chunks, chunk_ptr + offset + buf.len() as FileSize); + self.v2_chunks.write(chunk_ptr + chunk_offset, write_buf); + + chunk_offset = 0; + size_written += to_write; + remainder -= to_write; + } + + Ok(size_written) } fn read_chunks_v1( @@ -416,12 +444,12 @@ impl StableStorage { node, offset, file_size, - self.chunk_size() as FileSize, + chunk_size as FileSize, &mut self.ptr_cache, &mut self.v2_chunk_ptr, ); - for ((nd, _idx), chunk_ptr) in read_iter { + for ((nd, _idx), cached_chunk) in read_iter { assert!(nd == node); // finished reading, buffer full @@ -437,7 +465,7 @@ impl StableStorage { let read_buf = &mut buf[size_read as usize..size_read as usize + to_read as usize]; - if let Some(cptr) = chunk_ptr { + if let CachedChunkPtr::ChunkExists(cptr) = cached_chunk { self.v2_chunks.read(cptr + chunk_offset, read_buf); } else { // fill read buffer with 0 @@ -510,8 +538,10 @@ impl Storage for StableStorage { // Get the metadata associated with the node. fn get_metadata(&self, node: Node) -> Result { + if self.is_mounted(node) { if self.cache_journal.read_mounted_meta_node() == Some(node) { + let mut meta = Metadata::default(); self.cache_journal.read_mounted_meta(&mut meta); @@ -520,11 +550,20 @@ impl Storage for StableStorage { self.mounted_meta.get(&node).ok_or(Error::NotFound) } else { - if self.last_metadata.0 == node { - return Ok(self.last_metadata.1.clone()); + + let meta = self.meta_cache.get(node); + + if let Some(meta) = meta { + return Ok(meta); + } + + let meta = self.metadata.get(&node).ok_or(Error::NotFound); + + if let Ok(ref meta) = meta { + self.meta_cache.update(node, meta); } - self.metadata.get(&node).ok_or(Error::NotFound) + meta } } @@ -540,7 +579,7 @@ impl Storage for StableStorage { self.cache_journal.write_mounted_meta(&node, &metadata) } else { - self.last_metadata = (node, metadata.clone()); + self.meta_cache.update(node, &metadata); self.metadata.insert(node, metadata); } } @@ -602,39 +641,29 @@ impl Storage for StableStorage { buf.len() as FileSize } else { let end = offset + buf.len() as FileSize; - let mut written_size = 0; let use_v2 = self.use_v2(&metadata, node); if use_v2 { - let chunk_infos = get_chunk_infos(offset, end, self.chunk_size()); - - for chunk in chunk_infos.into_iter() { - self.write_filechunk_v2( - node, - chunk.index, - chunk.offset, - &buf[written_size..written_size + chunk.len as usize], - ); - - written_size += chunk.len as usize; - } + self.write_chunks_v2(node, offset, buf)? } else { let chunk_infos = get_chunk_infos(offset, end, FILE_CHUNK_SIZE_V1); + let mut written = 0usize; + for chunk in chunk_infos.into_iter() { self.write_filechunk_v1( node, chunk.index, chunk.offset, - &buf[written_size..written_size + chunk.len as usize], + &buf[written..(written + chunk.len as usize)], ); - written_size += chunk.len as usize; + written += chunk.len as usize; } - } - written_size as FileSize + written as FileSize + } }; let end = offset + buf.len() as FileSize; @@ -693,10 +722,7 @@ impl Storage for StableStorage { self.cache_journal.reset_mounted_meta() } - if self.last_metadata.0 == node { - // reset cached metadata - self.last_metadata.0 = u64::MAX; - } + self.meta_cache.clear(); Ok(()) } diff --git a/tests/a b/tests/a new file mode 100755 index 0000000..a8c2767 --- /dev/null +++ b/tests/a @@ -0,0 +1,7 @@ +#!/bin/bash + +set -e + +./b.sh + +ic-repl fsperf.repl \ No newline at end of file diff --git a/tests/b.sh b/tests/b.sh new file mode 100755 index 0000000..f234b43 --- /dev/null +++ b/tests/b.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +cd fs_benchmarks + +. build.sh + +cd .. + diff --git a/tests/canister_initial/Cargo.lock b/tests/canister_initial/Cargo.lock index 3306b3e..813784f 100644 --- a/tests/canister_initial/Cargo.lock +++ b/tests/canister_initial/Cargo.lock @@ -566,9 +566,9 @@ checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" [[package]] name = "serde" -version = "1.0.204" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc76f558e0cbb2a839d37354c575f1dc3fdc6546b5be373ba43d95f231bf7c12" +checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" dependencies = [ "serde_derive", ] @@ -584,9 +584,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.204" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0cd7e117be63d3c3678776753929474f3b04a43a080c744d6b0ae2a8c28e222" +checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2", "quote", @@ -647,7 +647,7 @@ dependencies = [ [[package]] name = "stable-fs" -version = "0.6.3" +version = "0.6.4" dependencies = [ "bitflags", "ciborium", diff --git a/tests/canister_upgraded/Cargo.lock b/tests/canister_upgraded/Cargo.lock index 20f651f..bb14adf 100644 --- a/tests/canister_upgraded/Cargo.lock +++ b/tests/canister_upgraded/Cargo.lock @@ -566,9 +566,9 @@ checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" [[package]] name = "serde" -version = "1.0.202" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "226b61a0d411b2ba5ff6d7f73a476ac4f8bb900373459cd00fab8512828ba395" +checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" dependencies = [ "serde_derive", ] @@ -584,9 +584,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.202" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6048858004bcff69094cd972ed40a32500f153bd3be9f716b2eed2e8217c4838" +checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2", "quote", @@ -647,7 +647,7 @@ dependencies = [ [[package]] name = "stable-fs" -version = "0.6.3" +version = "0.6.4" dependencies = [ "bitflags", "ciborium", diff --git a/tests/fsperf.repl b/tests/fsperf.repl new file mode 100755 index 0000000..27232e4 --- /dev/null +++ b/tests/fsperf.repl @@ -0,0 +1,84 @@ +#!ic-repl + +function install(wasm, args, cycle) { + let id = call ic.provisional_create_canister_with_cycles(record { settings = null; amount = cycle }); + let S = id.canister_id; + let _ = call ic.install_code( + record { + arg = args; + wasm_module = gzip(wasm); + mode = variant { install }; + canister_id = S; + } + ); + S +}; + +function upgrade(id, wasm, args) { + call ic.install_code( + record { + arg = args; + wasm_module = gzip(wasm); + mode = variant { upgrade }; + canister_id = id; + } + ); +}; + +function uninstall(id) { + call ic.stop_canister(record { canister_id = id }); + call ic.delete_canister(record { canister_id = id }); +}; + +function get_memory(cid) { + let _ = call ic.canister_status(record { canister_id = cid }); + _.memory_size +}; + +let file = "README.md"; + +let rs_config = record { start_page = 1; page_limit = 1128}; + +let wasm_name = "fs_benchmarks/target/wasm32-unknown-unknown/release/fs_benchmarks_backend_small.wasm"; + +function perf_write_10mb() { + let cid = install(wasm_profiling(wasm_name, rs_config), encode (), null); + + call cid.__toggle_tracing(); + call cid.append_buffer( "abc1234567", (1_00_00: nat64) ); + call cid.store_buffer( "test.txt"); + call cid.__toggle_tracing(); + + call cid.store_buffer( "test.txt"); + + flamegraph(cid, "perf_write_10mb", "svg/perf_write_10mb.svg"); + uninstall(cid) +}; + +function perf_write_10mb_segments() { + let cid = install(wasm_profiling(wasm_name, rs_config), encode (), null); + + call cid.__toggle_tracing(); + call cid.append_buffer( "abc1234567", (1_00_00: nat64) ); + call cid.store_buffer( "test.txt"); +// call cid.store_buffer( "test2.txt"); + call cid.__toggle_tracing(); + + call cid.store_buffer_in_1000b_segments( "test.txt"); + + flamegraph(cid, "perf_write_10mb_segments", "svg/perf_write_10mb_segments.svg"); + uninstall(cid) +}; + + +/// files +perf_write_10mb(); +perf_write_10mb_segments(); + + +//call cid.__toggle_tracing(); +//call cid.list_files("files"); +//call cid.__toggle_tracing(); + + + From 7f881db912e7f36bb81d214b9cd4210c4003bb54 Mon Sep 17 00:00:00 2001 From: wasm-forge <122647775+wasm-forge@users.noreply.github.com> Date: Sun, 13 Oct 2024 14:04:40 +0200 Subject: [PATCH 2/3] clean-up --- src/runtime/structure_helpers.rs | 4 +- src/storage.rs | 2 +- src/storage/metadata_cache.rs | 72 ++++++++++++++++++++++---------- src/storage/stable.rs | 15 ++++--- 4 files changed, 60 insertions(+), 33 deletions(-) diff --git a/src/runtime/structure_helpers.rs b/src/runtime/structure_helpers.rs index 8fe151f..9af16e8 100644 --- a/src/runtime/structure_helpers.rs +++ b/src/runtime/structure_helpers.rs @@ -84,9 +84,7 @@ pub fn find_node( Ok(result.node) } - Err(e) => { - Err(e) - } + Err(e) => Err(e), } } diff --git a/src/storage.rs b/src/storage.rs index 9ed2f0f..dd62fa8 100644 --- a/src/storage.rs +++ b/src/storage.rs @@ -11,8 +11,8 @@ mod allocator; mod chunk_iterator; pub mod dummy; mod journal; -mod ptr_cache; mod metadata_cache; +mod ptr_cache; pub mod stable; pub mod transient; pub mod types; diff --git a/src/storage/metadata_cache.rs b/src/storage/metadata_cache.rs index 770f060..a1c2e4f 100644 --- a/src/storage/metadata_cache.rs +++ b/src/storage/metadata_cache.rs @@ -13,14 +13,14 @@ pub(crate) struct MetadataCache { impl MetadataCache { pub fn new() -> MetadataCache { - let meta: Rc>> = Rc::new(RefCell::new(HashMap::with_capacity(CACHE_CAPACITY))); - + let meta: Rc>> = + Rc::new(RefCell::new(HashMap::with_capacity(CACHE_CAPACITY))); + MetadataCache { meta } } // add new cache meta pub fn update(&self, node: Node, new_meta: &Metadata) { - let mut meta = (*self.meta).borrow_mut(); if meta.len() + 1 > CACHE_CAPACITY { @@ -46,14 +46,21 @@ impl MetadataCache { #[cfg(test)] mod tests { - use crate::{fs::ChunkType, storage::types::{FileType, Times}}; + use crate::{ + fs::ChunkType, + storage::types::{FileType, Times}, + }; use super::*; #[test] fn cache_initialization() { let cache = MetadataCache::new(); - assert_eq!(cache.meta.borrow().len(), 0, "Cache should be empty on initialization"); + assert_eq!( + cache.meta.borrow().len(), + 0, + "Cache should be empty on initialization" + ); } #[test] @@ -74,19 +81,23 @@ mod tests { last_dir_entry: None, chunk_type: Some(ChunkType::V2), }; - + cache.update(node, &metadata); - + // Check if the metadata can be retrieved correctly let retrieved = cache.get(node); - assert_eq!(retrieved, Some(metadata), "Retrieved metadata should match inserted metadata"); + assert_eq!( + retrieved, + Some(metadata), + "Retrieved metadata should match inserted metadata" + ); } #[test] fn cache_clear() { let cache = MetadataCache::new(); let node = 1 as Node; - + let metadata = Metadata { node, file_type: FileType::RegularFile, @@ -106,18 +117,26 @@ mod tests { cache.clear(); // Cache should be empty after clear - assert_eq!(cache.meta.borrow().len(), 0, "Cache should be empty after clearing"); - assert_eq!(cache.get(node), None, "Metadata should be None after cache is cleared"); + assert_eq!( + cache.meta.borrow().len(), + 0, + "Cache should be empty after clearing" + ); + assert_eq!( + cache.get(node), + None, + "Metadata should be None after cache is cleared" + ); } #[test] fn cache_eviction_when_capacity_exceeded() { let cache = MetadataCache::new(); - + // Fill the cache to its capacity for i in 0..CACHE_CAPACITY { let node = i as Node; - + let metadata = Metadata { node, file_type: FileType::RegularFile, @@ -135,12 +154,16 @@ mod tests { cache.update(node, &metadata); } - - assert_eq!(cache.meta.borrow().len(), CACHE_CAPACITY, "Cache should have CACHE_CAPACITY entries"); - + + assert_eq!( + cache.meta.borrow().len(), + CACHE_CAPACITY, + "Cache should have CACHE_CAPACITY entries" + ); + // Add one more item to trigger eviction (if implemented) let extra_node = 1000 as Node; - + let extra_metadata = Metadata { node: extra_node, file_type: FileType::RegularFile, @@ -157,11 +180,14 @@ mod tests { }; cache.update(extra_node, &extra_metadata); - - assert!(cache.meta.borrow().len() <= CACHE_CAPACITY, "Cache should not exceed CACHE_CAPACITY"); - assert!(cache.get(extra_node).is_some(), "Extra node should be in the cache after eviction"); + + assert!( + cache.meta.borrow().len() <= CACHE_CAPACITY, + "Cache should not exceed CACHE_CAPACITY" + ); + assert!( + cache.get(extra_node).is_some(), + "Extra node should be in the cache after eviction" + ); } } - - - diff --git a/src/storage/stable.rs b/src/storage/stable.rs index 753a6cb..2651fbd 100644 --- a/src/storage/stable.rs +++ b/src/storage/stable.rs @@ -18,10 +18,16 @@ use crate::{ }; use super::{ - allocator::ChunkPtrAllocator, chunk_iterator::ChunkV2Iterator, journal::CacheJournal, metadata_cache::MetadataCache, ptr_cache::PtrCache, types::{ + allocator::ChunkPtrAllocator, + chunk_iterator::ChunkV2Iterator, + journal::CacheJournal, + metadata_cache::MetadataCache, + ptr_cache::PtrCache, + types::{ DirEntry, DirEntryIndex, FileChunk, FileChunkIndex, FileChunkPtr, FileSize, FileType, Header, Metadata, Node, Times, FILE_CHUNK_SIZE_V1, MAX_FILE_CHUNK_SIZE_V2, - }, Storage + }, + Storage, }; const ROOT_NODE: Node = 0; @@ -538,10 +544,8 @@ impl Storage for StableStorage { // Get the metadata associated with the node. fn get_metadata(&self, node: Node) -> Result { - if self.is_mounted(node) { if self.cache_journal.read_mounted_meta_node() == Some(node) { - let mut meta = Metadata::default(); self.cache_journal.read_mounted_meta(&mut meta); @@ -550,7 +554,6 @@ impl Storage for StableStorage { self.mounted_meta.get(&node).ok_or(Error::NotFound) } else { - let meta = self.meta_cache.get(node); if let Some(meta) = meta { @@ -558,7 +561,7 @@ impl Storage for StableStorage { } let meta = self.metadata.get(&node).ok_or(Error::NotFound); - + if let Ok(ref meta) = meta { self.meta_cache.update(node, meta); } From ee0ccda5b280eeb5ea6fdf1cf11c9e6d5f87e432 Mon Sep 17 00:00:00 2001 From: wasm-forge <122647775+wasm-forge@users.noreply.github.com> Date: Sun, 13 Oct 2024 14:11:43 +0200 Subject: [PATCH 3/3] clippy --- src/filename_cache.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/filename_cache.rs b/src/filename_cache.rs index 845fab5..ad6c1e4 100644 --- a/src/filename_cache.rs +++ b/src/filename_cache.rs @@ -61,7 +61,7 @@ mod tests { let filename = "test_file".to_string(); let node = 35 as Node; - cache.add((fd, filename.clone()), node.clone()); + cache.add((fd, filename.clone()), node); let retrieved_node = cache.get(&(fd, filename)); assert_eq!(retrieved_node, Some(node));