@@ -4,128 +4,135 @@ use crate::{LocalGuard, OwnedGuard};
44use std:: fmt;
55use std:: sync:: atomic:: { AtomicUsize , Ordering } ;
66
7- /// Fast, efficient, and robust memory reclamation .
7+ /// A concurrent garbage collector .
88///
9- /// A `Collector` manages the allocation and retirement of concurrent objects.
9+ /// A `Collector` manages the access and retirement of concurrent objects
1010/// Objects can be safely loaded through *guards*, which can be created using
1111/// the [`enter`](Collector::enter) or [`enter_owned`](Collector::enter_owned)
1212/// methods.
13+ ///
14+ /// Every instance of a concurrent data structure should typically own its
15+ /// `Collector`. This allows the garbage collection of non-`'static` values, as
16+ /// memory reclamation is guaranteed to run when the `Collector` is dropped.
1317pub struct Collector {
18+ /// A unique identifier for a collector.
1419 id : usize ,
20+
21+ /// The underlying raw collector instance.
1522 pub ( crate ) raw : raw:: Collector ,
1623}
1724
18- unsafe impl Send for Collector { }
19- unsafe impl Sync for Collector { }
25+ impl Default for Collector {
26+ fn default ( ) -> Self {
27+ Self :: new ( )
28+ }
29+ }
2030
2131impl Collector {
32+ /// The default batch size for a new collector.
2233 const DEFAULT_BATCH_SIZE : usize = 32 ;
2334
2435 /// Creates a new collector.
2536 pub fn new ( ) -> Self {
37+ // A counter for collector IDs.
2638 static ID : AtomicUsize = AtomicUsize :: new ( 0 ) ;
2739
40+ // Initialize the `membarrier` module, detecting the presence of
41+ // operating-system strong barrier APIs.
2842 membarrier:: detect ( ) ;
43+
2944 let cpus = std:: thread:: available_parallelism ( )
3045 . map ( Into :: into)
3146 . unwrap_or ( 1 ) ;
3247
48+ // Ensure every batch accumulates at least as many entries
49+ // as there are threads on the system.
3350 let batch_size = cpus. max ( Self :: DEFAULT_BATCH_SIZE ) ;
3451
3552 Self {
36- raw : raw:: Collector :: new ( cpus, batch_size) ,
3753 id : ID . fetch_add ( 1 , Ordering :: Relaxed ) ,
54+ raw : raw:: Collector :: new ( cpus, batch_size) ,
3855 }
3956 }
4057
41- /// Sets the number of values that must be in a batch
42- /// before reclamation is attempted.
43- ///
44- /// Retired values are added to thread-local *batches*
45- /// before starting the reclamation process. After
46- /// `batch_size` is hit, values are moved to separate
47- /// *retirement lists*, where reference counting kicks
48- /// in and batches are eventually reclaimed.
58+ /// Sets the number of objects that must be in a batch before reclamation is
59+ /// attempted.
4960 ///
50- /// A larger batch size means that deallocation is done
51- /// less frequently, but reclamation also becomes more
52- /// expensive due to longer retirement lists needing
53- /// to be traversed and freed .
61+ /// Retired objects are added to thread-local *batches* before starting the
62+ /// reclamation process. After `batch_size` is hit, the objects are moved to
63+ /// separate *retirement lists*, where reference counting kicks in and
64+ /// batches are eventually reclaimed .
5465 ///
55- /// Note that batch sizes should generally be larger
56- /// than the number of threads accessing objects.
66+ /// A larger batch size amortizes the cost of retirement. However,
67+ /// reclamation latency can also grow due to the large number of objects
68+ /// needed to be freed. Note that reclamation can not be attempted
69+ /// unless the batch contains at least as many objects as the number of
70+ /// active threads.
5771 ///
5872 /// The default batch size is `32`.
5973 pub fn batch_size ( mut self , batch_size : usize ) -> Self {
6074 self . raw . batch_size = batch_size;
6175 self
6276 }
6377
64- /// Marks the current thread as active, returning a guard
65- /// that allows protecting loads of concurrent objects. The thread
66- /// will be marked as inactive when the guard is dropped.
78+ /// Marks the current thread as active, returning a guard that protects
79+ /// loads of concurrent objects for its lifetime . The thread will be
80+ /// marked as inactive when the guard is dropped.
6781 ///
68- /// See [the guide](crate::guide#starting-operations) for an
69- /// introduction to using guards, or the documentation of [`LocalGuard`]
70- /// for more details.
82+ /// Note that loads of objects that may be retired must be protected with the
83+ /// [`Guard::protect`]. See [the guide](crate::guide#starting-operations) for
84+ /// an introduction to using guards, or the documentation of [`LocalGuard`] for
85+ /// more details.
7186 ///
72- /// # Performance
87+ /// Note that `enter` is reentrant, and it is legal to create multiple
88+ /// guards on the same thread. The thread will stay marked as active
89+ /// until the last guard is dropped.
7390 ///
74- /// Creating and destroying a guard is about the same as locking and
75- /// unlocking an uncontended `Mutex`, performance-wise. Because of this,
76- /// guards should be re-used across multiple operations if possible.
77- /// However, note that holding a guard prevents the reclamation of any
78- /// concurrent objects retired during it's lifetime, so there is a
79- /// tradeoff between performance and memory usage.
91+ /// [`Guard::protect`]: crate::Guard::protect
8092 ///
81- /// # Examples
82- ///
83- /// ```rust
84- /// # use std::sync::atomic::{AtomicPtr, Ordering};
85- /// # let collector = seize::Collector::new();
86- /// use seize::{reclaim, Linked, Guard};
93+ /// # Performance
8794 ///
88- /// let ptr = AtomicPtr::new(collector.link_boxed(1_usize));
95+ /// Performance-wise, creating and destroying a `LocalGuard` is about the
96+ /// same as locking and unlocking an uncontended `Mutex`. Because of
97+ /// this, guards should be reused across multiple operations if
98+ /// possible. However, holding a guard prevents the reclamation of any
99+ /// concurrent objects retired during its lifetime, so there is
100+ /// a tradeoff between performance and memory usage.
89101 ///
90- /// let guard = collector.enter();
91- /// let value = guard.protect(&ptr, Ordering::Acquire);
92- /// unsafe { assert_eq!(**value, 1) }
93- /// # unsafe { guard.defer_retire(value, reclaim::boxed) };
94- /// ```
95- ///
96- /// Note that `enter` is reentrant, and it is legal to create
97- /// multiple guards on the same thread. The thread will stay
98- /// marked as active until the last guard is dropped:
102+ /// # Examples
99103 ///
100104 /// ```rust
101105 /// # use std::sync::atomic::{AtomicPtr, Ordering};
106+ /// use seize::Guard;
102107 /// # let collector = seize::Collector::new();
103- /// use seize::{reclaim, Linked, Guard};
108+ ///
109+ /// // An atomic object.
110+ /// let ptr = AtomicPtr::new(Box::into_raw(Box::new(1_usize)));
104111 ///
105- /// let ptr = AtomicPtr::new(collector.link_boxed(1_usize));
112+ /// {
113+ /// // Create a guard that is active for this scope.
114+ /// let guard = collector.enter();
106115 ///
107- /// let guard1 = collector.enter();
108- /// let guard2 = collector.enter();
116+ /// // Read the object using a protected load.
117+ /// let value = guard.protect(&ptr, Ordering::Acquire);
118+ /// unsafe { assert_eq!(*value, 1) }
109119 ///
110- /// let value = guard2.protect(&ptr, Ordering::Acquire);
111- /// drop(guard1);
112- /// // the first guard is dropped, but `value`
113- /// // is still safe to access as a guard still
114- /// // exists
115- /// unsafe { assert_eq!(**value, 1) }
116- /// # unsafe { guard2.defer_retire(value, reclaim::boxed) };
117- /// drop(guard2) // _now_, the thread is marked as inactive
120+ /// // If there are other thread that may retire the object,
121+ /// // the pointer is no longer valid after the guard is dropped.
122+ /// drop(guard);
123+ /// }
124+ /// # unsafe { drop(Box::from_raw(ptr.load(Ordering::Relaxed))) };
118125 /// ```
119126 #[ inline]
120127 pub fn enter ( & self ) -> LocalGuard < ' _ > {
121128 LocalGuard :: enter ( self )
122129 }
123130
124- /// Create an owned guard that protects objects for it's lifetime.
131+ /// Create an owned guard that protects objects for its lifetime.
125132 ///
126- /// Unlike local guards created with [`enter`](Collector::enter),
127- /// owned guards are independent of the current thread, allowing
128- /// them to implement `Send`. See the documentation of [`OwnedGuard`]
133+ /// Unlike local guards created with [`enter`](Collector::enter), owned
134+ /// guards are independent of the current thread, allowing them to
135+ /// implement `Send` and `Sync `. See the documentation of [`OwnedGuard`]
129136 /// for more details.
130137 #[ inline]
131138 pub fn enter_owned ( & self ) -> OwnedGuard < ' _ > {
@@ -137,21 +144,17 @@ impl Collector {
137144 ///
138145 /// Note that this method is disconnected from any guards on the current
139146 /// thread, so the pointer may be reclaimed immediately. Use
140- /// [`Guard::defer_retire`](crate::Guard::defer_retire) if the pointer
141- /// may still be accessed by the current thread.
147+ /// [`Guard::defer_retire`](crate::Guard::defer_retire) if the pointer may
148+ /// still be accessed by the current thread while the guard is active .
142149 ///
143150 /// # Safety
144151 ///
145- /// The retired object must no longer be accessible to any thread that
152+ /// The retired pointer must no longer be accessible to any thread that
146153 /// enters after it is removed. It also cannot be accessed by the
147154 /// current thread after `retire` is called.
148155 ///
149- /// Retiring the same pointer twice can cause **undefined behavior**, even
150- /// if the reclaimer doesn't free memory.
151- ///
152- /// Additionally, the pointer must be valid to access as a [`Link`], per the
153- /// [`AsLink`] trait, and the reclaimer passed to `retire` must
154- /// correctly free values of type `T`.
156+ /// Additionally, the pointer must be valid to pass to the provided
157+ /// reclaimer, once it is safe to reclaim.
155158 ///
156159 /// # Examples
157160 ///
@@ -161,42 +164,47 @@ impl Collector {
161164 /// ```
162165 /// # use std::sync::atomic::{AtomicPtr, Ordering};
163166 /// # let collector = seize::Collector::new();
164- /// use seize::{ reclaim, Linked} ;
167+ /// use seize::reclaim;
165168 ///
166- /// let ptr = AtomicPtr::new(collector.link_boxed(1_usize));
169+ /// // An atomic object.
170+ /// let ptr = AtomicPtr::new(Box::into_raw(Box::new(1_usize)));
167171 ///
172+ /// // Create a guard.
168173 /// let guard = collector.enter();
169- /// // store the new value
170- /// let old = ptr.swap(collector.link_boxed(2_usize), Ordering::Release);
171- /// // reclaim the old value
172- /// // safety: the `swap` above made the old value unreachable for any new threads
174+ ///
175+ /// // Store a new value.
176+ /// let old = ptr.swap(Box::into_raw(Box::new(2_usize)), Ordering::Release);
177+ ///
178+ /// // Reclaim the old value.
179+ /// //
180+ /// // Safety: The `swap` above made the old value unreachable for any new threads.
181+ /// // Additionally, the old value was allocated with a `Box`, so `reclaim::boxed`
182+ /// // is valid.
173183 /// unsafe { collector.retire(old, reclaim::boxed) };
174184 /// # unsafe { collector.retire(ptr.load(Ordering::Relaxed), reclaim::boxed) };
175185 /// ```
176186 ///
177- /// Alternative, a custom reclaimer function can be used:
187+ /// Alternative, a custom reclaimer function can be used.
178188 ///
179189 /// ```
180- /// # use seize::{Link, Collector, Linked};
181- /// # let collector = Collector::new();
182- /// let value = collector.link_boxed(1);
190+ /// let collector = seize::Collector::new();
183191 ///
184- /// // safety: the value was never shared
185- /// unsafe {
186- /// collector.retire(value, |link: *mut Link| unsafe {
187- /// // safety: the value retired was of type *mut Linked<i32>
188- /// let ptr: *mut Linked<i32> = Link::cast(link);
192+ /// // Allocate a value and immediately retire it.
193+ /// let value: *mut usize = Box::into_raw(Box::new(1_usize));
189194 ///
190- /// // safety: the value was allocated with `link_boxed`
195+ /// // Safety: The value was never shared.
196+ /// unsafe {
197+ /// collector.retire(value, |ptr: *mut usize| unsafe {
198+ /// // Safety: The value was allocated with `Box::new`.
191199 /// let value = Box::from_raw(ptr);
192- /// println!("dropping {}", value );
200+ /// println!("Dropping {value}" );
193201 /// drop(value);
194202 /// });
195203 /// }
196204 /// ```
197205 #[ inline]
198206 pub unsafe fn retire < T > ( & self , ptr : * mut T , reclaim : unsafe fn ( * mut T ) ) {
199- debug_assert ! ( !ptr. is_null( ) , "attempted to retire null pointer" ) ;
207+ debug_assert ! ( !ptr. is_null( ) , "attempted to retire a null pointer" ) ;
200208
201209 // Note that `add` doesn't ever actually reclaim the pointer immediately if
202210 // the current thread is active. Instead, it adds it to the current thread's
@@ -217,8 +225,8 @@ impl Collector {
217225 /// threads are currently active, whether accessing values that have
218226 /// been retired or accessing the collector through any type of guard.
219227 /// This is akin to having a unique reference to the collector. However,
220- /// this method takes a shared reference, as reclaimers to be run by this
221- /// thread are allowed to access the collector recursively.
228+ /// this method takes a shared reference, as reclaimers to
229+ /// be run by this thread are allowed to access the collector recursively.
222230 ///
223231 /// # Notes
224232 ///
@@ -240,12 +248,6 @@ impl PartialEq for Collector {
240248 }
241249}
242250
243- impl Default for Collector {
244- fn default ( ) -> Self {
245- Self :: new ( )
246- }
247- }
248-
249251impl fmt:: Debug for Collector {
250252 fn fmt ( & self , f : & mut fmt:: Formatter < ' _ > ) -> fmt:: Result {
251253 f. debug_struct ( "Collector" )
0 commit comments