From 37244076e30085c861cf662f7b87c0c77b390678 Mon Sep 17 00:00:00 2001 From: Amin Yahyaabadi Date: Tue, 4 Jul 2023 01:39:56 -0700 Subject: [PATCH 1/9] feat: implement clone for all the containers --- src/index_map.rs | 9 +++++++++ src/index_set.rs | 9 +++++++++ src/map.rs | 18 ++++++++++++++++++ src/sync.rs | 36 ++++++++++++++++++++++++++++++++++++ src/vec.rs | 9 +++++++++ 5 files changed, 81 insertions(+) diff --git a/src/index_map.rs b/src/index_map.rs index fb70b7f..50d021f 100644 --- a/src/index_map.rs +++ b/src/index_map.rs @@ -235,3 +235,12 @@ impl Default for FrozenIndexMap { } } } + +impl Clone for FrozenIndexMap { + fn clone(&self) -> Self { + Self { + map: unsafe { self.map.get().as_ref().unwrap() }.clone().into(), + in_use: self.in_use.clone(), + } + } +} diff --git a/src/index_set.rs b/src/index_set.rs index 9e99632..28819d0 100644 --- a/src/index_set.rs +++ b/src/index_set.rs @@ -180,3 +180,12 @@ impl Default for FrozenIndexSet { Self::from(IndexSet::default()) } } + +impl Clone for FrozenIndexSet { + fn clone(&self) -> Self { + Self { + set: unsafe { self.set.get().as_ref().unwrap() }.clone().into(), + in_use: self.in_use.clone(), + } + } +} diff --git a/src/map.rs b/src/map.rs index 147a2ec..e8cf7c1 100644 --- a/src/map.rs +++ b/src/map.rs @@ -270,6 +270,15 @@ impl Default for FrozenMap { } } +impl Clone for FrozenMap { + fn clone(&self) -> Self { + Self { + map: unsafe { self.map.get().as_ref().unwrap() }.clone().into(), + in_use: self.in_use.clone(), + } + } +} + /// Append-only version of `std::collections::BTreeMap` where /// insertion does not require mutable access pub struct FrozenBTreeMap { @@ -495,3 +504,12 @@ impl Default for FrozenBTreeMap { } } } + +impl Clone for FrozenBTreeMap { + fn clone(&self) -> Self { + Self { + map: unsafe { self.map.get().as_ref().unwrap() }.clone().into(), + in_use: self.in_use.clone(), + } + } +} diff --git a/src/sync.rs b/src/sync.rs index a551b9a..c2e0a69 100644 --- a/src/sync.rs +++ b/src/sync.rs @@ -381,6 +381,14 @@ impl std::convert::AsMut> for FrozenMap { } } +impl Clone for FrozenMap { + fn clone(&self) -> Self { + Self { + map: self.map.read().unwrap().clone().into(), + } + } +} + /// Append-only threadsafe version of `std::vec::Vec` where /// insertion does not require mutable access pub struct FrozenVec { @@ -488,6 +496,14 @@ impl Default for FrozenVec { } } +impl Clone for FrozenVec { + fn clone(&self) -> Self { + Self { + vec: self.vec.read().unwrap().clone().into(), + } + } +} + // The context for these functions is that we want to have a // series of exponentially increasing buffer sizes. We want // to maximize the total size of the buffers (since this @@ -713,6 +729,20 @@ fn test_non_lockfree_unchecked() { LockFreeFrozenVec::<()>::new(); } +impl Clone for LockFreeFrozenVec { + fn clone(&self) -> Self { + let cap = self.cap.load(Ordering::Acquire); + let len = self.len.load(Ordering::Acquire); + + let new_vec = Self::with_capacity(cap); + for i in 0..len { + new_vec.push(self.get(i).unwrap()); + } + + new_vec + } +} + #[test] fn test_non_lockfree() { #[derive(Copy, Clone, Debug, PartialEq, Eq)] @@ -933,3 +963,9 @@ impl Default for FrozenBTreeMap { Self::new() } } + +impl Clone for FrozenBTreeMap { + fn clone(&self) -> Self { + Self(self.0.read().unwrap().clone().into()) + } +} diff --git a/src/vec.rs b/src/vec.rs index 33b6e6a..891a89c 100644 --- a/src/vec.rs +++ b/src/vec.rs @@ -216,6 +216,15 @@ impl Default for FrozenVec { } } +impl Clone for FrozenVec { + fn clone(&self) -> Self { + Self { + vec: unsafe { self.vec.get().as_ref().unwrap() }.clone().into(), + } + } +} + + impl From> for FrozenVec { fn from(vec: Vec) -> Self { Self { From a4f7b0a9a970e52ad003fa6633b3ee61e98523bf Mon Sep 17 00:00:00 2001 From: Amin Yahyaabadi Date: Wed, 9 Aug 2023 20:02:22 -0700 Subject: [PATCH 2/9] fix: set in_use to true while cloning --- src/index_map.rs | 10 +++++++--- src/index_set.rs | 10 +++++++--- src/map.rs | 20 ++++++++++++++------ src/vec.rs | 1 - 4 files changed, 28 insertions(+), 13 deletions(-) diff --git a/src/index_map.rs b/src/index_map.rs index 50d021f..b96179f 100644 --- a/src/index_map.rs +++ b/src/index_map.rs @@ -238,9 +238,13 @@ impl Default for FrozenIndexMap { impl Clone for FrozenIndexMap { fn clone(&self) -> Self { - Self { + assert!(!self.in_use.get()); + self.in_use.set(true); + let self_clone = Self { map: unsafe { self.map.get().as_ref().unwrap() }.clone().into(), - in_use: self.in_use.clone(), - } + in_use: Cell::from(false), + }; + self.in_use.set(false); + return self_clone; } } diff --git a/src/index_set.rs b/src/index_set.rs index 28819d0..59588ad 100644 --- a/src/index_set.rs +++ b/src/index_set.rs @@ -183,9 +183,13 @@ impl Default for FrozenIndexSet { impl Clone for FrozenIndexSet { fn clone(&self) -> Self { - Self { + assert!(!self.in_use.get()); + self.in_use.set(true); + let self_clone = Self { set: unsafe { self.set.get().as_ref().unwrap() }.clone().into(), - in_use: self.in_use.clone(), - } + in_use: Cell::from(false), + }; + self.in_use.set(false); + return self_clone; } } diff --git a/src/map.rs b/src/map.rs index e8cf7c1..1c94868 100644 --- a/src/map.rs +++ b/src/map.rs @@ -272,10 +272,14 @@ impl Default for FrozenMap { impl Clone for FrozenMap { fn clone(&self) -> Self { - Self { + assert!(!self.in_use.get()); + self.in_use.set(true); + let self_clone = Self { map: unsafe { self.map.get().as_ref().unwrap() }.clone().into(), - in_use: self.in_use.clone(), - } + in_use: Cell::from(false), + }; + self.in_use.set(false); + return self_clone; } } @@ -507,9 +511,13 @@ impl Default for FrozenBTreeMap { impl Clone for FrozenBTreeMap { fn clone(&self) -> Self { - Self { + assert!(!self.in_use.get()); + self.in_use.set(true); + let self_clone = Self { map: unsafe { self.map.get().as_ref().unwrap() }.clone().into(), - in_use: self.in_use.clone(), - } + in_use: Cell::from(false), + }; + self.in_use.set(false); + return self_clone; } } diff --git a/src/vec.rs b/src/vec.rs index 891a89c..592e443 100644 --- a/src/vec.rs +++ b/src/vec.rs @@ -224,7 +224,6 @@ impl Clone for FrozenVec { } } - impl From> for FrozenVec { fn from(vec: Vec) -> Self { Self { From 8a5603a2b819c8a9719db4b4ee6c54feef3d3dad Mon Sep 17 00:00:00 2001 From: Amin Yahyaabadi Date: Sat, 23 Sep 2023 01:37:07 -0700 Subject: [PATCH 3/9] fix: reimplement Clone for new LockFreeFrozenVec --- src/sync.rs | 43 ++++++++++++++++++++++++++++++++++++++----- 1 file changed, 38 insertions(+), 5 deletions(-) diff --git a/src/sync.rs b/src/sync.rs index c2e0a69..73b6f06 100644 --- a/src/sync.rs +++ b/src/sync.rs @@ -731,15 +731,42 @@ fn test_non_lockfree_unchecked() { impl Clone for LockFreeFrozenVec { fn clone(&self) -> Self { - let cap = self.cap.load(Ordering::Acquire); let len = self.len.load(Ordering::Acquire); + // handle the empty case + if len == 0 { + return Self::default(); + } - let new_vec = Self::with_capacity(cap); - for i in 0..len { - new_vec.push(self.get(i).unwrap()); + // copy the data + let data = [Self::NULL; NUM_BUFFERS]; + // for each buffer, copy the data + for i in 0..NUM_BUFFERS { + // get the buffer size and index + let buffer_size = buffer_size(i); + let buffer_idx = buffer_index(buffer_size - 1); + // get the buffer pointer + let buffer_ptr = self.data[buffer_idx].load(Ordering::Acquire); + if buffer_ptr.is_null() { + // no data in this buffer + break; + } + // allocate a new buffer + let layout = Self::layout(buffer_size); + let new_buffer_ptr = unsafe { std::alloc::alloc(layout).cast::() }; + assert!(!new_buffer_ptr.is_null()); + // copy the data + unsafe { + std::ptr::copy_nonoverlapping(buffer_ptr, new_buffer_ptr, buffer_size); + } + // store the new buffer pointer + data[i].store(new_buffer_ptr, Ordering::Release); } - new_vec + return Self { + data, + len: AtomicUsize::new(len), + locked: AtomicBool::new(false), + }; } } @@ -774,6 +801,12 @@ fn test_non_lockfree() { } }); + // Test cloning + let vec2 = vec.clone(); + assert_eq!(vec2.get(0), Some(Moo(1))); + assert_eq!(vec2.get(1), Some(Moo(2))); + assert_eq!(vec2.get(2), Some(Moo(3))); + // Test dropping empty vecs LockFreeFrozenVec::<()>::new(); } From efea7c410df11070a7c145793793069f35ac25c7 Mon Sep 17 00:00:00 2001 From: Amin Yahyaabadi Date: Tue, 26 Sep 2023 21:38:53 -0700 Subject: [PATCH 4/9] feat: add for_each_buffer method for LockFreeFrozenVec --- src/sync.rs | 66 ++++++++++++++++++++++++++++++++++++----------------- 1 file changed, 45 insertions(+), 21 deletions(-) diff --git a/src/sync.rs b/src/sync.rs index 73b6f06..1405361 100644 --- a/src/sync.rs +++ b/src/sync.rs @@ -693,6 +693,40 @@ impl LockFreeFrozenVec { let local_index = index - prior_total_buffer_size(buffer_idx); unsafe { *buffer_ptr.add(local_index) } } + + /// Run a function on each buffer in the vector. + /// + /// ## Arguments + /// - `func`: a function that takes a pointer to the buffer, the buffer size, and the buffer index + /// + /// ## Safety + /// `func` is provided with the raw constant pointer to the buffer, + /// however, the pointer is expected to be valid as the null check is done before calling `func`. + /// To access the data in the buffer, make sure the buffer size (the second argument) is respected. + /// + pub unsafe fn for_each_buffer(&self, func: impl Fn(*const T, usize, usize)) { + let len = self.len.load(Ordering::Acquire); + // handle the empty case + if len == 0 { + return; + } + + // for each buffer, run the function + for buffer_index in 0..NUM_BUFFERS { + // get the buffer size and index + let buffer_size = buffer_size(buffer_index); + + // get the buffer pointer + let buffer_ptr = self.data[buffer_index].load(Ordering::Acquire); + if buffer_ptr.is_null() { + // no data in this buffer, so we're done + break; + } + + // run the function + func(buffer_ptr, buffer_size, buffer_index); + } + } } #[test] @@ -737,30 +771,20 @@ impl Clone for LockFreeFrozenVec { return Self::default(); } - // copy the data let data = [Self::NULL; NUM_BUFFERS]; // for each buffer, copy the data - for i in 0..NUM_BUFFERS { - // get the buffer size and index - let buffer_size = buffer_size(i); - let buffer_idx = buffer_index(buffer_size - 1); - // get the buffer pointer - let buffer_ptr = self.data[buffer_idx].load(Ordering::Acquire); - if buffer_ptr.is_null() { - // no data in this buffer - break; - } - // allocate a new buffer - let layout = Self::layout(buffer_size); - let new_buffer_ptr = unsafe { std::alloc::alloc(layout).cast::() }; - assert!(!new_buffer_ptr.is_null()); - // copy the data - unsafe { + unsafe { + self.for_each_buffer(|buffer_ptr, buffer_size, buffer_index| { + // allocate a new buffer + let layout = Self::layout(buffer_size); + let new_buffer_ptr = std::alloc::alloc(layout).cast::(); + assert!(!new_buffer_ptr.is_null()); + // copy the data std::ptr::copy_nonoverlapping(buffer_ptr, new_buffer_ptr, buffer_size); - } - // store the new buffer pointer - data[i].store(new_buffer_ptr, Ordering::Release); - } + // store the new buffer pointer + data[buffer_index].store(new_buffer_ptr, Ordering::Release); + }) + }; return Self { data, From aa7ee7a317d154cc17f0ff05e3549f011376f6c1 Mon Sep 17 00:00:00 2001 From: Amin Yahyaabadi Date: Tue, 26 Sep 2023 21:41:44 -0700 Subject: [PATCH 5/9] fix: use get_mut instead of store for the new data --- src/sync.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/sync.rs b/src/sync.rs index 1405361..54dcd45 100644 --- a/src/sync.rs +++ b/src/sync.rs @@ -704,7 +704,7 @@ impl LockFreeFrozenVec { /// however, the pointer is expected to be valid as the null check is done before calling `func`. /// To access the data in the buffer, make sure the buffer size (the second argument) is respected. /// - pub unsafe fn for_each_buffer(&self, func: impl Fn(*const T, usize, usize)) { + pub unsafe fn for_each_buffer(&self, mut func: impl FnMut(*const T, usize, usize)) { let len = self.len.load(Ordering::Acquire); // handle the empty case if len == 0 { @@ -771,7 +771,7 @@ impl Clone for LockFreeFrozenVec { return Self::default(); } - let data = [Self::NULL; NUM_BUFFERS]; + let mut coppied_data = [Self::NULL; NUM_BUFFERS]; // for each buffer, copy the data unsafe { self.for_each_buffer(|buffer_ptr, buffer_size, buffer_index| { @@ -782,12 +782,12 @@ impl Clone for LockFreeFrozenVec { // copy the data std::ptr::copy_nonoverlapping(buffer_ptr, new_buffer_ptr, buffer_size); // store the new buffer pointer - data[buffer_index].store(new_buffer_ptr, Ordering::Release); + *coppied_data[buffer_index].get_mut() = new_buffer_ptr; }) }; return Self { - data, + data: coppied_data, len: AtomicUsize::new(len), locked: AtomicBool::new(false), }; From 0b0647541132b8c67e6ecb074262583875b9f7cc Mon Sep 17 00:00:00 2001 From: Amin Yahyaabadi Date: Tue, 26 Sep 2023 21:47:03 -0700 Subject: [PATCH 6/9] fix: remove extra length check in clone --- src/sync.rs | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/src/sync.rs b/src/sync.rs index 54dcd45..3897edb 100644 --- a/src/sync.rs +++ b/src/sync.rs @@ -713,9 +713,6 @@ impl LockFreeFrozenVec { // for each buffer, run the function for buffer_index in 0..NUM_BUFFERS { - // get the buffer size and index - let buffer_size = buffer_size(buffer_index); - // get the buffer pointer let buffer_ptr = self.data[buffer_index].load(Ordering::Acquire); if buffer_ptr.is_null() { @@ -723,6 +720,9 @@ impl LockFreeFrozenVec { break; } + // get the buffer size and index + let buffer_size = buffer_size(buffer_index); + // run the function func(buffer_ptr, buffer_size, buffer_index); } @@ -765,12 +765,6 @@ fn test_non_lockfree_unchecked() { impl Clone for LockFreeFrozenVec { fn clone(&self) -> Self { - let len = self.len.load(Ordering::Acquire); - // handle the empty case - if len == 0 { - return Self::default(); - } - let mut coppied_data = [Self::NULL; NUM_BUFFERS]; // for each buffer, copy the data unsafe { @@ -788,7 +782,7 @@ impl Clone for LockFreeFrozenVec { return Self { data: coppied_data, - len: AtomicUsize::new(len), + len: AtomicUsize::new(self.len.load(Ordering::Relaxed)), locked: AtomicBool::new(false), }; } From b34ae88cfce765c0baa629758563fdd3434a767c Mon Sep 17 00:00:00 2001 From: Amin Yahyaabadi Date: Mon, 23 Oct 2023 12:24:41 -0700 Subject: [PATCH 7/9] fix: pass a safe buffer slice in for_each_buffer --- src/sync.rs | 81 +++++++++++++++++++++++++++-------------------------- 1 file changed, 42 insertions(+), 39 deletions(-) diff --git a/src/sync.rs b/src/sync.rs index 55463bf..433b288 100644 --- a/src/sync.rs +++ b/src/sync.rs @@ -17,12 +17,13 @@ use std::hash::Hash; use std::iter::{FromIterator, IntoIterator}; use std::ops::Index; -use std::sync::TryLockError; +use std::ptr::NonNull; use std::sync::atomic::AtomicBool; use std::sync::atomic::AtomicPtr; use std::sync::atomic::AtomicUsize; use std::sync::atomic::Ordering; use std::sync::RwLock; +use std::sync::TryLockError; /// Append-only threadsafe version of `std::collections::HashMap` where /// insertion does not require mutable access @@ -33,9 +34,7 @@ pub struct FrozenMap { impl fmt::Debug for FrozenMap { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self.map.try_read() { - Ok(guard) => { - guard.fmt(f) - }, + Ok(guard) => guard.fmt(f), Err(TryLockError::Poisoned(err)) => { f.debug_tuple("FrozenMap").field(&&**err.get_ref()).finish() } @@ -46,8 +45,10 @@ impl fmt::Debug for FrozenMap { f.write_str("") } } - f.debug_tuple("FrozenMap").field(&LockedPlaceholder).finish() - }, + f.debug_tuple("FrozenMap") + .field(&LockedPlaceholder) + .finish() + } } } } @@ -74,7 +75,6 @@ impl From> for FrozenVec { } } - impl FrozenMap { // these should never return &K or &V // these should never delete any entries @@ -440,9 +440,7 @@ pub struct FrozenVec { impl fmt::Debug for FrozenVec { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self.vec.try_read() { - Ok(guard) => { - guard.fmt(f) - }, + Ok(guard) => guard.fmt(f), Err(TryLockError::Poisoned(err)) => { f.debug_tuple("FrozenMap").field(&&**err.get_ref()).finish() } @@ -453,8 +451,10 @@ impl fmt::Debug for FrozenVec { f.write_str("") } } - f.debug_tuple("FrozenMap").field(&LockedPlaceholder).finish() - }, + f.debug_tuple("FrozenMap") + .field(&LockedPlaceholder) + .finish() + } } } } @@ -816,14 +816,9 @@ impl LockFreeFrozenVec { /// Run a function on each buffer in the vector. /// /// ## Arguments - /// - `func`: a function that takes a pointer to the buffer, the buffer size, and the buffer index - /// - /// ## Safety - /// `func` is provided with the raw constant pointer to the buffer, - /// however, the pointer is expected to be valid as the null check is done before calling `func`. - /// To access the data in the buffer, make sure the buffer size (the second argument) is respected. + /// - `func`: a function that takes a slice to the buffer and the buffer index /// - pub unsafe fn for_each_buffer(&self, mut func: impl FnMut(*const T, usize, usize)) { + fn for_each_buffer(&self, mut func: impl FnMut(&[T], usize)) { let len = self.len.load(Ordering::Acquire); // handle the empty case if len == 0 { @@ -833,17 +828,21 @@ impl LockFreeFrozenVec { // for each buffer, run the function for buffer_index in 0..NUM_BUFFERS { // get the buffer pointer - let buffer_ptr = self.data[buffer_index].load(Ordering::Acquire); - if buffer_ptr.is_null() { + if let Some(buffer_ptr) = NonNull::new(self.data[buffer_index].load(Ordering::Acquire)) + { + // get the buffer size and index + let buffer_size = buffer_size(buffer_index); + + // create a slice from the buffer pointer and size + let buffer_slice = + unsafe { std::slice::from_raw_parts(buffer_ptr.as_ptr(), buffer_size) }; + + // run the function + func(buffer_slice, buffer_index); + } else { // no data in this buffer, so we're done break; } - - // get the buffer size and index - let buffer_size = buffer_size(buffer_index); - - // run the function - func(buffer_ptr, buffer_size, buffer_index); } } } @@ -886,18 +885,22 @@ impl Clone for LockFreeFrozenVec { fn clone(&self) -> Self { let mut coppied_data = [Self::NULL; NUM_BUFFERS]; // for each buffer, copy the data - unsafe { - self.for_each_buffer(|buffer_ptr, buffer_size, buffer_index| { - // allocate a new buffer - let layout = Self::layout(buffer_size); - let new_buffer_ptr = std::alloc::alloc(layout).cast::(); - assert!(!new_buffer_ptr.is_null()); - // copy the data - std::ptr::copy_nonoverlapping(buffer_ptr, new_buffer_ptr, buffer_size); - // store the new buffer pointer - *coppied_data[buffer_index].get_mut() = new_buffer_ptr; - }) - }; + self.for_each_buffer(|buffer_slice, buffer_index| { + // allocate a new buffer + let layout = Self::layout(buffer_slice.len()); + let new_buffer_ptr = unsafe { std::alloc::alloc(layout).cast::() }; + assert!(!new_buffer_ptr.is_null()); + // copy the data to the new buffer + unsafe { + std::ptr::copy_nonoverlapping( + buffer_slice.as_ptr(), + new_buffer_ptr, + buffer_slice.len(), + ); + }; + // store the new buffer pointer + *coppied_data[buffer_index].get_mut() = new_buffer_ptr; + }); return Self { data: coppied_data, From 561d57b2353ae04cd2077d49d7579fa2de139d40 Mon Sep 17 00:00:00 2001 From: Amin Yahyaabadi Date: Mon, 23 Oct 2023 12:27:37 -0700 Subject: [PATCH 8/9] test: test cloning a large vector --- src/sync.rs | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/src/sync.rs b/src/sync.rs index 433b288..28679f6 100644 --- a/src/sync.rs +++ b/src/sync.rs @@ -942,10 +942,23 @@ fn test_non_lockfree() { }); // Test cloning - let vec2 = vec.clone(); - assert_eq!(vec2.get(0), Some(Moo(1))); - assert_eq!(vec2.get(1), Some(Moo(2))); - assert_eq!(vec2.get(2), Some(Moo(3))); + { + let vec2 = vec.clone(); + assert_eq!(vec2.get(0), Some(Moo(1))); + assert_eq!(vec2.get(1), Some(Moo(2))); + assert_eq!(vec2.get(2), Some(Moo(3))); + } + // Test cloning a large vector + { + let large_vec = LockFreeFrozenVec::new(); + for i in 0..1000 { + large_vec.push(Moo(i)); + } + let large_vec_2 = large_vec.clone(); + for i in 0..1000 { + assert_eq!(large_vec_2.get(i), Some(Moo(i as i32))); + } + } // Test dropping empty vecs LockFreeFrozenVec::<()>::new(); From 555dff3f59344c6587f1eb0a6fc3bcd59ce494cb Mon Sep 17 00:00:00 2001 From: Amin Yahyaabadi Date: Mon, 23 Oct 2023 12:28:27 -0700 Subject: [PATCH 9/9] fix: remove length check for the empty vector --- src/sync.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/sync.rs b/src/sync.rs index 28679f6..cf236ab 100644 --- a/src/sync.rs +++ b/src/sync.rs @@ -819,12 +819,6 @@ impl LockFreeFrozenVec { /// - `func`: a function that takes a slice to the buffer and the buffer index /// fn for_each_buffer(&self, mut func: impl FnMut(&[T], usize)) { - let len = self.len.load(Ordering::Acquire); - // handle the empty case - if len == 0 { - return; - } - // for each buffer, run the function for buffer_index in 0..NUM_BUFFERS { // get the buffer pointer @@ -959,6 +953,12 @@ fn test_non_lockfree() { assert_eq!(large_vec_2.get(i), Some(Moo(i as i32))); } } + // Test cloning an empty vector + { + let empty_vec = LockFreeFrozenVec::<()>::new(); + let empty_vec_2 = empty_vec.clone(); + assert_eq!(empty_vec_2.get(0), None); + } // Test dropping empty vecs LockFreeFrozenVec::<()>::new();