Skip to content

Commit 7f8b83a

Browse files
committed
Merge branch 'feature-spare-elements' into feature-std-compatibility
2 parents 5dd7b59 + e68a751 commit 7f8b83a

1 file changed

Lines changed: 46 additions & 37 deletions

File tree

src/lib.rs

Lines changed: 46 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -90,6 +90,7 @@ impl<H, T> HeaderVec<H, T> {
9090

9191
pub fn with_capacity(capacity: usize, head: H) -> Self {
9292
const { assert!(mem::size_of::<T>() > 0, "HeaderVec does not support ZST's") };
93+
9394
// Allocate the initial memory, which is uninitialized.
9495
let layout = Self::layout(capacity);
9596
let ptr = unsafe { alloc::alloc::alloc(layout) } as *mut AlignedHeader<H, T>;
@@ -693,8 +694,52 @@ impl<H, T: Clone> HeaderVec<H, T> {
693694

694695
#[cfg(feature = "atomic_append")]
695696
/// The atomic append API is only enabled when the `atomic_append` feature flag is set (which
696-
/// is the default).
697+
/// is the default). The [`push_atomic()`] or [`extend_from_slice_atomic()`] methods then
698+
/// become available and some internals using atomic operations.
699+
///
700+
/// This API implements interior-mutable appending to a shared `HeaderVec`. To other threads
701+
/// the appended elements are either not seen or all seen at once. Without additional
702+
/// synchronization these appends are racy but memory safe. The intention behind this API is to
703+
/// provide facilities for building other container abstractions the benefit from the shared
704+
/// non blocking nature while being unaffected from the racy semantics or provide synchronization
705+
/// on their own (Eg: reference counted data, interners, streaming parsers, etc). Since the
706+
/// `HeaderVec` is a shared object and we have only a `&self`, it can not be reallocated and moved,
707+
/// therefore appending can only be done within the reserved capacity.
708+
///
709+
/// # Safety
710+
///
711+
/// Only one single thread must try to [`push_atomic()`] or [`extend_from_slice_atomic()`] the
712+
/// `HeaderVec` at at time using the atomic append API's. The actual implementations of this
713+
/// restriction is left to the caller. This can be done by mutexes or guard objects. Or
714+
/// simply by staying single threaded or ensuring somehow else that there is only a single
715+
/// thread using the atomic_appending API.
697716
impl<H, T> HeaderVec<H, T> {
717+
/// Atomically adds an item to the end of the list without reallocation.
718+
///
719+
/// # Errors
720+
///
721+
/// If the vector is full, the item is returned.
722+
///
723+
/// # Safety
724+
///
725+
/// There must be only one thread calling this method at any time. Synchronization has to
726+
/// be provided by the user.
727+
pub unsafe fn push_atomic(&self, item: T) -> Result<(), T> {
728+
// relaxed is good enough here because this should be the only thread calling this method.
729+
let len = self.len_atomic_relaxed();
730+
if len < self.capacity() {
731+
unsafe {
732+
core::ptr::write(self.end_ptr_atomic_mut(), item);
733+
};
734+
let len_again = self.len_atomic_add_release(1);
735+
// in debug builds we check for races, the chance to catch these are still pretty minimal
736+
debug_assert_eq!(len_again, len, "len was updated by another thread");
737+
Ok(())
738+
} else {
739+
Err(item)
740+
}
741+
}
742+
698743
/// Get the length of the vector with `Ordering::Acquire`. This ensures that the length is
699744
/// properly synchronized after it got atomically updated.
700745
#[inline(always)]
@@ -720,48 +765,12 @@ impl<H, T> HeaderVec<H, T> {
720765
self.header().len.fetch_add(n, Ordering::Release)
721766
}
722767

723-
#[inline(always)]
724-
pub fn is_empty_atomic_acquire(&self) -> bool {
725-
self.len_atomic_acquire() == 0
726-
}
727-
728-
#[inline(always)]
729-
pub fn as_slice_atomic_acquire(&self) -> &[T] {
730-
unsafe { core::slice::from_raw_parts(self.as_ptr(), self.len_atomic_acquire()) }
731-
}
732-
733768
/// Gets the pointer to the end of the slice. This returns a mutable pointer to
734769
/// uninitialized memory behind the last element.
735770
#[inline(always)]
736771
fn end_ptr_atomic_mut(&self) -> *mut T {
737772
unsafe { self.as_ptr().add(self.len_atomic_acquire()) as *mut T }
738773
}
739-
740-
/// Atomically adds an item to the end of the list without reallocation.
741-
///
742-
/// # Errors
743-
///
744-
/// If the vector is full, the item is returned.
745-
///
746-
/// # Safety
747-
///
748-
/// There must be only one thread calling this method at any time. Synchronization has to
749-
/// be provided by the user.
750-
pub unsafe fn push_atomic(&self, item: T) -> Result<(), T> {
751-
// relaxed is good enough here because this should be the only thread calling this method.
752-
let len = self.len_atomic_relaxed();
753-
if len < self.capacity() {
754-
unsafe {
755-
core::ptr::write(self.end_ptr_atomic_mut(), item);
756-
};
757-
let len_again = self.len_atomic_add_release(1);
758-
// in debug builds we check for races, the chance to catch these are still pretty minimal
759-
debug_assert_eq!(len_again, len, "len was updated by another thread");
760-
Ok(())
761-
} else {
762-
Err(item)
763-
}
764-
}
765774
}
766775

767776
#[cfg(feature = "atomic_append")]

0 commit comments

Comments
 (0)