@@ -12,8 +12,8 @@ use std::sync::atomic::{AtomicUsize, Ordering};
1212/// methods.
1313///
1414/// Every instance of a concurrent data structure should typically own its
15- /// `Collector`. This allows the garbage collection of non-`'static` values,
16- /// as memory reclamation is guaranteed to run when the `Collector` is dropped.
15+ /// `Collector`. This allows the garbage collection of non-`'static` values, as
16+ /// memory reclamation is guaranteed to run when the `Collector` is dropped.
1717pub struct Collector {
1818 /// A unique identifier for a collector.
1919 id : usize ,
@@ -37,8 +37,8 @@ impl Collector {
3737 // A counter for collector IDs.
3838 static ID : AtomicUsize = AtomicUsize :: new ( 0 ) ;
3939
40- // Initialize the `membarrier` module, detecting the presence
41- // of operating-system strong barrier APIs.
40+ // Initialize the `membarrier` module, detecting the presence of
41+ // operating-system strong barrier APIs.
4242 membarrier:: detect ( ) ;
4343
4444 let cpus = std:: thread:: available_parallelism ( )
@@ -55,48 +55,47 @@ impl Collector {
5555 }
5656 }
5757
58- /// Sets the number of objects that must be in a batch
59- /// before reclamation is attempted.
58+ /// Sets the number of objects that must be in a batch before reclamation is
59+ /// attempted.
6060 ///
61- /// Retired objects are added to thread-local *batches*
62- /// before starting the reclamation process. After
63- /// `batch_size` is hit, the objects are moved to separate
64- /// *retirement lists*, where reference counting kicks
65- /// in and batches are eventually reclaimed.
61+ /// Retired objects are added to thread-local *batches* before starting the
62+ /// reclamation process. After `batch_size` is hit, the objects are moved to
63+ /// separate *retirement lists*, where reference counting kicks in and
64+ /// batches are eventually reclaimed.
6665 ///
67- /// A larger batch size amortizes the cost of retirement.
68- /// However, reclamation latency can also grow due to the
69- /// large number of objects needed to be freed. Note that
70- /// reclamation can not be attempted unless the batch contains
71- /// at least as many objects as the number of active threads.
66+ /// A larger batch size amortizes the cost of retirement. However,
67+ /// reclamation latency can also grow due to the large number of objects
68+ /// needed to be freed. Note that reclamation can not be attempted
69+ /// unless the batch contains at least as many objects as the number of
70+ /// active threads.
7271 ///
7372 /// The default batch size is `32`.
7473 pub fn batch_size ( mut self , batch_size : usize ) -> Self {
7574 self . raw . batch_size = batch_size;
7675 self
7776 }
7877
79- /// Marks the current thread as active, returning a guard
80- /// that protects loads of concurrent objects for its lifetime.
81- /// The thread will be marked as inactive when the guard is dropped.
78+ /// Marks the current thread as active, returning a guard that protects
79+ /// loads of concurrent objects for its lifetime. The thread will be
80+ /// marked as inactive when the guard is dropped.
8281 ///
83- /// Note that loads of objects that may be retired must be paired with
84- /// the [`seize::protect`](crate::protect) function to be protected.
85- /// See [the guide](crate::guide#starting-operations) for an introduction
86- /// to using guards, or the documentation of [`LocalGuard`] for more details.
82+ /// Note that loads of objects that may be retired must be paired with the
83+ /// [`seize::protect`](crate::protect) function to be protected. See [the
84+ /// guide](crate::guide#starting-operations) for an introduction to using
85+ /// guards, or the documentation of [`LocalGuard`] for more details.
8786 ///
88- /// Note that `enter` is reentrant, and it is legal to create multiple guards
89- /// on the same thread. The thread will stay marked as active until the last
90- /// guard is dropped.
87+ /// Note that `enter` is reentrant, and it is legal to create multiple
88+ /// guards on the same thread. The thread will stay marked as active
89+ /// until the last guard is dropped.
9190 ///
9291 /// # Performance
9392 ///
94- /// Performance-wise, creating and destroying a `LocalGuard` is about the same as locking and
95- /// unlocking an uncontended `Mutex`. Because of this,
96- /// guards should be reused across multiple operations if possible.
97- /// However, holding a guard prevents the reclamation of any concurrent
98- /// objects retired during its lifetime, so there is a tradeoff between
99- /// performance and memory usage.
93+ /// Performance-wise, creating and destroying a `LocalGuard` is about the
94+ /// same as locking and unlocking an uncontended `Mutex`. Because of
95+ /// this, guards should be reused across multiple operations if
96+ /// possible. However, holding a guard prevents the reclamation of any
97+ /// concurrent objects retired during its lifetime, so there is
98+ /// a tradeoff between performance and memory usage.
10099 ///
101100 /// # Examples
102101 ///
@@ -129,10 +128,10 @@ impl Collector {
129128
130129 /// Create an owned guard that protects objects for its lifetime.
131130 ///
132- /// Unlike local guards created with [`enter`](Collector::enter),
133- /// owned guards are independent of the current thread, allowing
134- /// them to implement `Send` and `Sync`. See the documentation of
135- /// [`OwnedGuard`] for more details.
131+ /// Unlike local guards created with [`enter`](Collector::enter), owned
132+ /// guards are independent of the current thread, allowing them to
133+ /// implement `Send` and `Sync`. See the documentation of [`OwnedGuard`]
134+ /// for more details.
136135 #[ inline]
137136 pub fn enter_owned ( & self ) -> OwnedGuard < ' _ > {
138137 OwnedGuard :: enter ( self )
@@ -143,8 +142,8 @@ impl Collector {
143142 ///
144143 /// Note that this method is disconnected from any guards on the current
145144 /// thread, so the pointer may be reclaimed immediately. Use
146- /// [`Guard::defer_retire`](crate::Guard::defer_retire) if the pointer
147- /// may still be accessed by the current thread while the guard is active.
145+ /// [`Guard::defer_retire`](crate::Guard::defer_retire) if the pointer may
146+ /// still be accessed by the current thread while the guard is active.
148147 ///
149148 /// # Safety
150149 ///
@@ -224,13 +223,14 @@ impl Collector {
224223 /// threads are currently active, whether accessing values that have
225224 /// been retired or accessing the collector through any type of guard.
226225 /// This is akin to having a unique reference to the collector. However,
227- /// this method takes a shared reference, as reclaimers to be run by this
228- /// thread are allowed to access the collector recursively.
226+ /// this method takes a shared reference, as reclaimers to
227+ /// be run by this thread are allowed to access the collector recursively.
229228 ///
230229 /// # Notes
231230 ///
232231 /// Note that if reclaimers initialize guards across threads, or initialize
233- /// owned guards, objects retired through those guards may not be reclaimed.
232+ /// owned guards, objects retired through those guards may not be
233+ /// reclaimed.
234234 pub unsafe fn reclaim_all ( & self ) {
235235 unsafe { self . raw . reclaim_all ( ) } ;
236236 }
@@ -246,19 +246,19 @@ impl PartialEq for Collector {
246246 }
247247}
248248
249- /// Strengthens an ordering to that necessary to protect the load of a concurrent
250- /// object.
249+ /// Strengthens an ordering to that necessary to protect the load of a
250+ /// concurrent object.
251251///
252252/// If this function is paired with the load of an object while a guard is
253- /// active on the current thread, the value of that object is guaranteed to
254- /// stay valid until the guard is dropped or the object is retired by the
255- /// current thread. Importantly, if another thread retires this object, it will
256- /// not be reclaimed for the lifetime of the guard.
253+ /// active on the current thread, the value of that object is guaranteed to stay
254+ /// valid until the guard is dropped or the object is retired by the current
255+ /// thread. Importantly, if another thread retires this object, it will not be
256+ /// reclaimed for the lifetime of the guard.
257257///
258258/// Although this function returns an ordering, it must be *paired* with a given
259259/// load for that load to be considered protected. That is, this function must
260- /// execute before that load. This can be achieved by simply calling this function
261- /// directly on the load ordering:
260+ /// execute before that load. This can be achieved by simply calling this
261+ /// function directly on the load ordering:
262262///
263263/// ```ignore
264264/// let value = ptr.load(seize::protect(Ordering::Acquire));
@@ -270,11 +270,11 @@ impl PartialEq for Collector {
270270/// let value = ptr.swap(ptr::null_mut(), seize::protect(Ordering::AcqRel));
271271/// ```
272272///
273- /// Note that the lifetime of a guarded pointer is logically tied to that of
274- /// the guard — when the guard is dropped the pointer is invalidated. However,
273+ /// Note that the lifetime of a guarded pointer is logically tied to that of the
274+ /// guard — when the guard is dropped the pointer is invalidated. However,
275275/// managing the lifetime of any references is up to the caller. Data structures
276- /// that return shared references should ensure that the lifetime of the references
277- /// are tied to the lifetime of a guard.
276+ /// that return shared references should ensure that the lifetime of the
277+ /// references are tied to the lifetime of a guard.
278278pub fn protect ( ordering : Ordering ) -> Ordering {
279279 raw:: Collector :: protect ( ordering)
280280}
0 commit comments