diff --git a/src/lib.rs b/src/lib.rs index 0ca244c..bbb4fb2 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -408,6 +408,32 @@ impl HeaderVec { fn end_ptr_atomic_mut(&self) -> *mut T { unsafe { self.ptr.add(Self::offset()).add(self.len_atomic_acquire()) } } + + /// Atomically adds an item to the end of the list without reallocation. + /// + /// # Errors + /// + /// If the vector is full, the item is returned. + /// + /// # Safety + /// + /// There must be only one thread calling this method at any time. Synchronization has to + /// be provided by the user. + pub unsafe fn push_atomic(&self, item: T) -> Result<(), T> { + // relaxed is good enough here because this should be the only thread calling this method. + let len = self.len_atomic_relaxed(); + if len < self.capacity() { + unsafe { + core::ptr::write(self.end_ptr_atomic_mut(), item); + }; + let len_again = self.len_atomic_add_release(1); + // in debug builds we check for races, the chance to catch these are still pretty minimal + debug_assert_eq!(len_again, len, "len was updated by another thread"); + Ok(()) + } else { + Err(item) + } + } } impl Drop for HeaderVec { diff --git a/tests/atomic_append.rs b/tests/atomic_append.rs new file mode 100644 index 0000000..a1112db --- /dev/null +++ b/tests/atomic_append.rs @@ -0,0 +1,16 @@ +#![cfg(feature = "atomic_append")] +extern crate std; + +use header_vec::*; + +#[test] +fn test_atomic_append() { + let mut hv = HeaderVec::with_capacity(10, ()); + + hv.push(1); + unsafe { hv.push_atomic(2).unwrap() }; + hv.push(3); + + assert_eq!(hv.len(), 3); + assert_eq!(hv.as_slice(), [1, 2, 3]); +}