Skip to content

Commit 257d6f0

Browse files
committed
Use compare_exchange instead of compare_and_swap
Apparently CAS is deprecated :o
1 parent 595b523 commit 257d6f0

File tree

2 files changed

+101
-54
lines changed

2 files changed

+101
-54
lines changed

libs/context/src/handle.rs

Lines changed: 92 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,10 @@ impl<C: RawHandleImpl> GcHandleList<C> {
4545
last_free_slot: AtomicPtr::new(null_mut()),
4646
}
4747
}
48+
/// Append the specified slot to this list
49+
///
50+
/// The specified slot must be logically owned
51+
/// and not already part of this list
4852
unsafe fn append_free_slot(&self, slot: *mut HandleSlot<C>) {
4953
// Verify it's actually free...
5054
debug_assert_eq!(
@@ -63,14 +67,27 @@ impl<C: RawHandleImpl> GcHandleList<C> {
6367
*/
6468
(*slot).freed.prev_free_slot
6569
.store(last_free, Ordering::Release);
66-
let actual_last_free = self.last_free_slot.compare_and_swap(
70+
/*
71+
* We really dont want surprise failures because we're going to
72+
* have to redo the above store if that happens.
73+
* Likewise we want acquire ordering so we don't fail unnecessarily
74+
* on retry.
75+
* In theory this is premature optimization, but I really want to
76+
* make this as straightforward as possible.
77+
* Maybe we should look into the efficiency of this on ARM?
78+
*/
79+
match self.last_free_slot.compare_exchange(
6780
last_free, slot,
68-
Ordering::AcqRel
69-
);
70-
if actual_last_free == last_free {
71-
return // Success
72-
} else {
73-
last_free = actual_last_free;
81+
Ordering::AcqRel,
82+
Ordering::Acquire
83+
) {
84+
Ok(actual) => {
85+
debug_assert_eq!(actual, last_free);
86+
return; // Success
87+
},
88+
Err(actual) => {
89+
last_free = actual;
90+
}
7491
}
7592
}
7693
}
@@ -96,29 +113,36 @@ impl<C: RawHandleImpl> GcHandleList<C> {
96113
* If this CAS succeeds, we have ownership.
97114
* Otherwise another thread beat us and
98115
* we must try again.
116+
*
117+
* Avoid relaxed ordering and compare_exchange_weak
118+
* to make this straightforward.
99119
*/
100-
let actual_slot = self.last_free_slot.compare_and_swap(
120+
match self.last_free_slot.compare_exchange(
101121
slot, prev,
102-
Ordering::AcqRel
103-
);
104-
if actual_slot == slot {
105-
// Verify it's actually free...
106-
debug_assert_eq!(
122+
Ordering::AcqRel,
123+
Ordering::Acquire
124+
) {
125+
Ok(actual_slot) => {
126+
debug_assert_eq!(actual_slot, slot);
127+
// Verify it's actually free...
128+
debug_assert_eq!(
129+
(*slot).valid.value
130+
.load(Ordering::SeqCst),
131+
std::ptr::null_mut()
132+
);
133+
/*
134+
* We own the slot, initialize it to point to
135+
* the provided pointer. The user is responsible
136+
* for any remaining initialization.
137+
*/
107138
(*slot).valid.value
108-
.load(Ordering::SeqCst),
109-
std::ptr::null_mut()
110-
);
111-
/*
112-
* We own the slot, initialize it to point to
113-
* the provided pointer. The user is responsible
114-
* for any remaining initialization.
115-
*/
116-
(*slot).valid.value
117-
.store(value, Ordering::Release);
118-
return &(*slot).valid;
119-
} else {
120-
// Try again
121-
slot = actual_slot;
139+
.store(value, Ordering::Release);
140+
return &(*slot).valid;
141+
},
142+
Err(actual_slot) => {
143+
// Try again
144+
slot = actual_slot;
145+
}
122146
}
123147
}
124148
// Empty free list
@@ -173,20 +197,25 @@ impl<C: RawHandleImpl> GcHandleList<C> {
173197
last_alloc: AtomicUsize::new(0),
174198
prev: AtomicPtr::new(prev_bucket),
175199
}));
176-
let actual_bucket = self.last_bucket.compare_and_swap(
177-
prev_bucket, allocated_bucket, Ordering::SeqCst
178-
);
179-
if actual_bucket == prev_bucket {
180-
Ok(&*actual_bucket)
181-
} else {
182-
/*
183-
* Someone else beat us too creating the bucket.
184-
*
185-
* Free the bucket we've created and return
186-
* their bucket
187-
*/
188-
drop(Box::from_raw(allocated_bucket));
189-
Err(&*actual_bucket)
200+
match self.last_bucket.compare_exchange(
201+
prev_bucket, allocated_bucket,
202+
Ordering::SeqCst,
203+
Ordering::SeqCst
204+
) {
205+
Ok(actual_bucket) => {
206+
assert_eq!(actual_bucket, prev_bucket);
207+
Ok(&*actual_bucket)
208+
},
209+
Err(actual_bucket) => {
210+
/*
211+
* Someone else beat us to creating the bucket.
212+
*
213+
* Free the bucket we've created and return
214+
* their bucket
215+
*/
216+
drop(Box::from_raw(allocated_bucket));
217+
Err(&*actual_bucket)
218+
}
190219
}
191220
}
192221
/// Trace the [GcHandle] using the specified closure.
@@ -267,10 +296,11 @@ impl<C: RawHandleImpl> GcHandleBucket<C> {
267296
for (i, slot) in self.slots.iter().enumerate()
268297
.skip(last_alloc) {
269298
// TODO: All these fences must be horrible on ARM
270-
if slot.valid.value.compare_and_swap(
299+
if slot.valid.value.compare_exchange(
271300
std::ptr::null_mut(), value,
272-
Ordering::AcqRel
273-
).is_null() {
301+
Ordering::AcqRel,
302+
Ordering::Relaxed
303+
).is_ok() {
274304
// We acquired ownership!
275305
self.last_alloc.fetch_max(i, Ordering::AcqRel);
276306
return Some(&*slot);
@@ -469,16 +499,30 @@ impl<T: GcSafe, C: RawHandleImpl> Clone for GcHandle<T, C> {
469499
.is_null(),
470500
"Pointer is invalid"
471501
);
502+
let mut old_refcnt = inner.refcnt.load(Ordering::Relaxed);
472503
loop {
473-
let old_refcnt = inner.refcnt.load(Ordering::Relaxed);
474504
assert_ne!(
475505
old_refcnt, isize::max_value() as usize,
476506
"Reference count overflow"
477507
);
478-
if inner.refcnt.compare_and_swap(
508+
/*
509+
* NOTE: Relaxed is sufficient for failure since we have no
510+
* expectations about the new state. Weak exchange is okay
511+
* since we retry in a loop.
512+
*
513+
* NOTE: We do **not** use fetch_add because we are afraid
514+
* of refcount overflow. We should possibly consider it
515+
*/
516+
match inner.refcnt.compare_exchange_weak(
479517
old_refcnt, old_refcnt + 1,
480-
Ordering::AcqRel
481-
) == old_refcnt { break };
518+
Ordering::AcqRel,
519+
Ordering::Relaxed
520+
) {
521+
Ok(_) => break,
522+
Err(val) => {
523+
old_refcnt = val;
524+
}
525+
}
482526
}
483527
GcHandle {
484528
inner: self.inner,

libs/context/src/state/sync.rs

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -188,9 +188,11 @@ unsafe impl<C> super::RawContext<C> for RawContext<C>
188188
let state = &mut *guard;
189189
// If there is not a active `PendingCollection` - create one
190190
if state.pending.is_none() {
191-
assert!(!collector.manager().collecting.compare_and_swap(
192-
false, true, Ordering::SeqCst
193-
));
191+
assert_eq!(collector.manager().collecting.compare_exchange(
192+
false, true,
193+
Ordering::SeqCst,
194+
Ordering::Relaxed, // Failure is a panic either way -_-
195+
), Ok(false));
194196
let id = state.next_pending_id();
195197
#[allow(clippy::mutable_key_type)] // Used for debugging (see below)
196198
let known_contexts = state.known_contexts.get_mut();
@@ -572,10 +574,11 @@ pub(crate) trait SyncCollectorImpl: RawCollectorImpl<Manager=CollectionManager<S
572574
};
573575
if waiting_contexts == 0 {
574576
*pending_ref = None;
575-
assert!(self.manager().collecting.compare_and_swap(
577+
assert_eq!(self.manager().collecting.compare_exchange(
576578
true, false,
577-
Ordering::SeqCst
578-
));
579+
Ordering::SeqCst,
580+
Ordering::Relaxed, // Failure is catastrophic
581+
), Ok(true));
579582
// Someone may be waiting for us to become `None`
580583
self.manager().collection_wait.notify_all();
581584
}

0 commit comments

Comments
 (0)