Skip to content

Commit 3c2221e

Browse files
committed
add &Collector argument to reclaimers
1 parent 9cd3210 commit 3c2221e

File tree

6 files changed

+62
-50
lines changed

6 files changed

+62
-50
lines changed

docs/GUIDE.md

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -209,14 +209,17 @@ impl<T> Stack<T> {
209209
If you need to run custom reclamation code, you can write a custom reclaimer.
210210

211211
```rust,ignore
212-
collector.retire(value, |value: *mut Node<T>| unsafe {
212+
collector.retire(value, |value: *mut Node<T>, _collector: &Collector| unsafe {
213213
// Safety: The value was allocated with `Box::new`.
214214
let value = Box::from_raw(ptr);
215215
println!("Dropping {value}");
216216
drop(value);
217217
});
218218
```
219219

220+
Note that the reclaimer receives a reference to the collector as its second
221+
argument, allowing for recursive reclamation.
222+
220223
[`defer_retire`]:
221224
https://docs.rs/seize/latest/seize/trait.Guard.html#tymethod.defer_retire
222225
[`Guard::protect`]:

src/collector.rs

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@ use crate::raw::{self, membarrier, Thread};
22
use crate::{LocalGuard, OwnedGuard};
33

44
use std::fmt;
5-
use std::sync::atomic::{AtomicUsize, Ordering};
65

76
/// A concurrent garbage collector.
87
///
@@ -14,10 +13,8 @@ use std::sync::atomic::{AtomicUsize, Ordering};
1413
/// Every instance of a concurrent data structure should typically own its
1514
/// `Collector`. This allows the garbage collection of non-`'static` values, as
1615
/// memory reclamation is guaranteed to run when the `Collector` is dropped.
16+
#[repr(transparent)]
1717
pub struct Collector {
18-
/// A unique identifier for a collector.
19-
id: usize,
20-
2118
/// The underlying raw collector instance.
2219
pub(crate) raw: raw::Collector,
2320
}
@@ -34,9 +31,6 @@ impl Collector {
3431

3532
/// Creates a new collector.
3633
pub fn new() -> Self {
37-
// A counter for collector IDs.
38-
static ID: AtomicUsize = AtomicUsize::new(0);
39-
4034
// Initialize the `membarrier` module, detecting the presence of
4135
// operating-system strong barrier APIs.
4236
membarrier::detect();
@@ -50,7 +44,6 @@ impl Collector {
5044
let batch_size = cpus.max(Self::DEFAULT_BATCH_SIZE);
5145

5246
Self {
53-
id: ID.fetch_add(1, Ordering::Relaxed),
5447
raw: raw::Collector::new(cpus, batch_size),
5548
}
5649
}
@@ -187,14 +180,16 @@ impl Collector {
187180
/// Alternative, a custom reclaimer function can be used.
188181
///
189182
/// ```
190-
/// let collector = seize::Collector::new();
183+
/// use seize::Collector;
184+
///
185+
/// let collector = Collector::new();
191186
///
192187
/// // Allocate a value and immediately retire it.
193188
/// let value: *mut usize = Box::into_raw(Box::new(1_usize));
194189
///
195190
/// // Safety: The value was never shared.
196191
/// unsafe {
197-
/// collector.retire(value, |ptr: *mut usize| unsafe {
192+
/// collector.retire(value, |ptr: *mut usize, _collector: &Collector| unsafe {
198193
/// // Safety: The value was allocated with `Box::new`.
199194
/// let value = Box::from_raw(ptr);
200195
/// println!("Dropping {value}");
@@ -203,7 +198,7 @@ impl Collector {
203198
/// }
204199
/// ```
205200
#[inline]
206-
pub unsafe fn retire<T>(&self, ptr: *mut T, reclaim: unsafe fn(*mut T)) {
201+
pub unsafe fn retire<T>(&self, ptr: *mut T, reclaim: unsafe fn(*mut T, &Collector)) {
207202
debug_assert!(!ptr.is_null(), "attempted to retire a null pointer");
208203

209204
// Note that `add` doesn't ever actually reclaim the pointer immediately if
@@ -236,6 +231,11 @@ impl Collector {
236231
pub unsafe fn reclaim_all(&self) {
237232
unsafe { self.raw.reclaim_all() };
238233
}
234+
235+
// Create a reference to `Collector` from an underlying `raw::Collector`.
236+
pub(crate) fn from_raw(raw: &raw::Collector) -> &Collector {
237+
unsafe { &*(raw as *const raw::Collector as *const Collector) }
238+
}
239239
}
240240

241241
impl Eq for Collector {}
@@ -244,7 +244,7 @@ impl PartialEq for Collector {
244244
/// Checks if both references point to the same collector.
245245
#[inline]
246246
fn eq(&self, other: &Self) -> bool {
247-
self.id == other.id
247+
self.raw.id == other.raw.id
248248
}
249249
}
250250

src/guard.rs

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,7 @@ pub trait Guard {
7676
/// The retired pointer must no longer be accessible to any thread that
7777
/// enters after it is removed. Additionally, the pointer must be valid
7878
/// to pass to the provided reclaimer, once it is safe to reclaim.
79-
unsafe fn defer_retire<T>(&self, ptr: *mut T, reclaim: unsafe fn(*mut T));
79+
unsafe fn defer_retire<T>(&self, ptr: *mut T, reclaim: unsafe fn(*mut T, &Collector));
8080
}
8181

8282
/// A guard that keeps the current thread marked as active.
@@ -169,7 +169,7 @@ impl Guard for LocalGuard<'_> {
169169
/// Retires a value, running `reclaim` when no threads hold a reference to
170170
/// it.
171171
#[inline]
172-
unsafe fn defer_retire<T>(&self, ptr: *mut T, reclaim: unsafe fn(*mut T)) {
172+
unsafe fn defer_retire<T>(&self, ptr: *mut T, reclaim: unsafe fn(*mut T, &Collector)) {
173173
// Safety:
174174
// - `self.thread` is the current thread.
175175
// - The validity of the pointer is guaranteed by the caller.
@@ -292,7 +292,7 @@ impl Guard for OwnedGuard<'_> {
292292
/// Retires a value, running `reclaim` when no threads hold a reference to
293293
/// it.
294294
#[inline]
295-
unsafe fn defer_retire<T>(&self, ptr: *mut T, reclaim: unsafe fn(*mut T)) {
295+
unsafe fn defer_retire<T>(&self, ptr: *mut T, reclaim: unsafe fn(*mut T, &Collector)) {
296296
// Safety: `self.reservation` is owned by the current thread.
297297
let reservation = unsafe { &*self.reservation };
298298
let _lock = reservation.lock.lock().unwrap();

src/raw/collector.rs

Lines changed: 25 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,9 @@ pub struct Collector {
2727
/// exiting.
2828
reservations: ThreadLocal<CachePadded<Reservation>>,
2929

30+
/// A unique identifier for a collector.
31+
pub(crate) id: usize,
32+
3033
/// The minimum number of nodes required in a batch before attempting
3134
/// retirement.
3235
pub(crate) batch_size: usize,
@@ -36,7 +39,11 @@ impl Collector {
3639
/// Create a collector with the provided batch size and initial thread
3740
/// count.
3841
pub fn new(threads: usize, batch_size: usize) -> Self {
42+
// A counter for collector IDs.
43+
static ID: AtomicUsize = AtomicUsize::new(0);
44+
3945
Self {
46+
id: ID.fetch_add(1, Ordering::Relaxed),
4047
reservations: ThreadLocal::with_capacity(threads),
4148
batches: ThreadLocal::with_capacity(threads),
4249
batch_size: batch_size.next_power_of_two(),
@@ -120,7 +127,7 @@ impl Collector {
120127
atomic::fence(Ordering::Acquire);
121128

122129
// Decrement the reference counts of any batches that were retired.
123-
unsafe { Collector::traverse(head) }
130+
unsafe { self.traverse(head) }
124131
}
125132
}
126133

@@ -138,7 +145,7 @@ impl Collector {
138145

139146
if head != Entry::INACTIVE {
140147
// Decrement the reference counts of any batches that were retired.
141-
unsafe { Collector::traverse(head) }
148+
unsafe { self.traverse(head) }
142149
}
143150
}
144151

@@ -157,7 +164,12 @@ impl Collector {
157164
/// Additionally, current thread must have unique access to the batch for
158165
/// the provided `thread`.
159166
#[inline]
160-
pub unsafe fn add<T>(&self, ptr: *mut T, reclaim: unsafe fn(*mut T), thread: Thread) {
167+
pub unsafe fn add<T>(
168+
&self,
169+
ptr: *mut T,
170+
reclaim: unsafe fn(*mut T, &crate::Collector),
171+
thread: Thread,
172+
) {
161173
// Safety: The caller guarantees we have unique access to the batch.
162174
let local_batch = unsafe { self.batches.load(thread).get() };
163175

@@ -170,12 +182,13 @@ impl Collector {
170182
// Safety: `LocalBatch::DROP` means we have unique access to the collector.
171183
// Additionally, the caller guarantees that the pointer is valid for the
172184
// provided reclaimer.
173-
unsafe { reclaim(ptr) }
185+
unsafe { reclaim(ptr, crate::Collector::from_raw(self)) }
174186
return;
175187
}
176188

177189
// Safety: `fn(*mut T) and fn(*mut U)` are ABI compatible if `T, U: Sized`.
178-
let reclaim: unsafe fn(*mut ()) = unsafe { std::mem::transmute(reclaim) };
190+
let reclaim: unsafe fn(*mut (), &crate::Collector) =
191+
unsafe { std::mem::transmute(reclaim) };
179192

180193
// Safety: The caller guarantees we have unique access to the batch.
181194
let len = unsafe {
@@ -364,7 +377,7 @@ impl Collector {
364377
// Additionally, the local batch has been reset and we are not holding on to any
365378
// mutable references, so any recursive calls to `retire` during
366379
// reclamation are valid.
367-
unsafe { Collector::free_batch(batch) }
380+
unsafe { self.free_batch(batch) }
368381
}
369382
}
370383

@@ -376,7 +389,7 @@ impl Collector {
376389
/// `list` must be a valid reservation list.
377390
#[cold]
378391
#[inline(never)]
379-
unsafe fn traverse(mut list: *mut Entry) {
392+
unsafe fn traverse(&self, mut list: *mut Entry) {
380393
while !list.is_null() {
381394
let curr = list;
382395

@@ -396,7 +409,7 @@ impl Collector {
396409

397410
// Safety: We have the last reference to the batch and it has been removed from
398411
// our reservation list.
399-
Collector::free_batch(batch)
412+
self.free_batch(batch)
400413
}
401414
}
402415
}
@@ -433,7 +446,7 @@ impl Collector {
433446
// ensured it is non-null. Additionally, the local batch was reset
434447
// above, so the batch is inaccessible through recursive calls to
435448
// `retire`.
436-
unsafe { Collector::free_batch(batch) };
449+
unsafe { self.free_batch(batch) };
437450

438451
// Reset the batch.
439452
//
@@ -453,10 +466,10 @@ impl Collector {
453466
/// access the local batch; the batch being retired must be unreachable
454467
/// through any recursive calls.
455468
#[inline]
456-
unsafe fn free_batch(batch: *mut Batch) {
469+
unsafe fn free_batch(&self, batch: *mut Batch) {
457470
// Safety: We have a unique reference to the batch.
458471
for entry in unsafe { (*batch).entries.iter_mut() } {
459-
unsafe { (entry.reclaim)(entry.ptr.cast()) };
472+
unsafe { (entry.reclaim)(entry.ptr.cast(), crate::Collector::from_raw(self)) };
460473
}
461474

462475
unsafe { LocalBatch::free(batch) };
@@ -530,7 +543,7 @@ struct Entry {
530543
ptr: *mut (),
531544

532545
/// The function used to reclaim the object.
533-
reclaim: unsafe fn(*mut ()),
546+
reclaim: unsafe fn(*mut (), &crate::Collector),
534547

535548
/// The state of the retired object.
536549
state: EntryState,

src/reclaim.rs

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,12 +6,14 @@
66
77
use std::ptr;
88

9+
use crate::Collector;
10+
911
/// Reclaims memory allocated with [`Box`].
1012
///
1113
/// # Safety
1214
///
1315
/// The safety requirements of [`Box::from_raw`] apply.
14-
pub unsafe fn boxed<T>(ptr: *mut T) {
16+
pub unsafe fn boxed<T>(ptr: *mut T, _collector: &Collector) {
1517
unsafe { drop(Box::from_raw(ptr)) }
1618
}
1719

@@ -20,6 +22,6 @@ pub unsafe fn boxed<T>(ptr: *mut T) {
2022
/// # Safety
2123
///
2224
/// The safety requirements of [`ptr::drop_in_place`] apply.
23-
pub unsafe fn in_place<T>(ptr: *mut T) {
25+
pub unsafe fn in_place<T>(ptr: *mut T, _collector: &Collector) {
2426
unsafe { ptr::drop_in_place::<T>(ptr) }
2527
}

tests/lib.rs

Lines changed: 14 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ use seize::{reclaim, Collector, Guard};
33
use std::mem::ManuallyDrop;
44
use std::ptr;
55
use std::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
6-
use std::sync::{mpsc, Arc, Barrier, OnceLock};
6+
use std::sync::{mpsc, Arc, Barrier};
77
use std::thread;
88

99
#[test]
@@ -154,34 +154,33 @@ fn refresh() {
154154

155155
#[test]
156156
fn recursive_retire() {
157-
fn collector() -> &'static Collector {
158-
static COLLECTOR: OnceLock<Collector> = OnceLock::new();
159-
COLLECTOR.get_or_init(|| Collector::new().batch_size(1))
160-
}
161-
162157
struct Recursive {
163158
_value: usize,
164159
pointers: Vec<*mut usize>,
165160
}
166161

162+
let collector = Collector::new().batch_size(1);
163+
167164
let ptr = boxed(Recursive {
168165
_value: 0,
169166
pointers: (0..cfg::ITEMS).map(boxed).collect(),
170167
});
171168

172169
unsafe {
173-
collector().retire(ptr, |link| {
174-
let value = Box::from_raw(link.cast::<Recursive>());
170+
collector.retire(ptr, |ptr: *mut Recursive, collector| {
171+
let value = Box::from_raw(ptr);
172+
175173
for pointer in value.pointers {
176-
collector().retire(pointer, reclaim::boxed);
177-
let mut guard = collector().enter();
174+
collector.retire(pointer, reclaim::boxed);
175+
176+
let mut guard = collector.enter();
178177
guard.flush();
179178
guard.refresh();
180179
drop(guard);
181180
}
182181
});
183182

184-
collector().enter().flush();
183+
collector.enter().flush();
185184
}
186185
}
187186

@@ -209,34 +208,29 @@ fn reclaim_all() {
209208
fn recursive_retire_reclaim_all() {
210209
struct Recursive {
211210
_value: usize,
212-
collector: *mut Collector,
213211
pointers: Vec<*mut DropTrack>,
214212
}
215213

216214
unsafe {
217-
// make sure retire runs in drop, not immediately
218-
let collector = Box::into_raw(Box::new(Collector::new().batch_size(cfg::ITEMS * 2)));
215+
let collector = Collector::new().batch_size(cfg::ITEMS * 2);
219216
let dropped = Arc::new(AtomicUsize::new(0));
220217

221218
let ptr = boxed(Recursive {
222219
_value: 0,
223-
collector,
224220
pointers: (0..cfg::ITEMS)
225221
.map(|_| boxed(DropTrack(dropped.clone())))
226222
.collect(),
227223
});
228224

229-
(*collector).retire(ptr, |link| {
230-
let value = Box::from_raw(link.cast::<Recursive>());
231-
let collector = value.collector;
225+
collector.retire(ptr, |ptr: *mut Recursive, collector| {
226+
let value = Box::from_raw(ptr);
232227
for pointer in value.pointers {
233228
(*collector).retire(pointer, reclaim::boxed);
234229
}
235230
});
236231

237-
(*collector).reclaim_all();
232+
collector.reclaim_all();
238233
assert_eq!(dropped.load(Ordering::Relaxed), cfg::ITEMS);
239-
let _ = Box::from_raw(collector);
240234
}
241235
}
242236

0 commit comments

Comments
 (0)