Skip to content

Commit c3f8f63

Browse files
committed
Fix Arc::downgrade locking logic
1 parent aa5d7af commit c3f8f63

File tree

3 files changed

+153
-85
lines changed

3 files changed

+153
-85
lines changed

library/alloc/src/raw_rc.rs

Lines changed: 19 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -27,11 +27,18 @@ use crate::string::String;
2727
use crate::vec::Vec;
2828

2929
pub unsafe trait RcOps {
30-
unsafe fn inc_ref(count: &UnsafeCell<usize>);
31-
unsafe fn dec_ref(count: &UnsafeCell<usize>) -> bool;
30+
unsafe fn inc_strong(strong_count: &UnsafeCell<usize>);
31+
unsafe fn dec_strong(strong_count: &UnsafeCell<usize>) -> bool;
32+
33+
unsafe fn inc_weak(weak_count: &UnsafeCell<usize>);
34+
unsafe fn dec_weak(weak_count: &UnsafeCell<usize>) -> bool;
35+
3236
unsafe fn upgrade(strong_count: &UnsafeCell<usize>) -> bool;
37+
unsafe fn downgrade(weak_count: &UnsafeCell<usize>);
38+
3339
unsafe fn lock_strong_count(strong_count: &UnsafeCell<usize>) -> bool;
3440
unsafe fn unlock_strong_count(strong_count: &UnsafeCell<usize>);
41+
3542
unsafe fn is_unique(strong_count: &UnsafeCell<usize>, weak_count: &UnsafeCell<usize>) -> bool;
3643

3744
#[cfg(not(no_global_oom_handling))]
@@ -356,26 +363,14 @@ where
356363
unsafe { Self::from_raw_parts(self.ptr, self.alloc.clone()) }
357364
}
358365

359-
unsafe fn clone_unchecked<R>(&self) -> Self
360-
where
361-
A: Clone,
362-
R: RcOps,
363-
{
364-
unsafe {
365-
R::inc_ref(self.weak_count_unchecked());
366-
367-
self.clone_without_inc_ref()
368-
}
369-
}
370-
371366
pub unsafe fn clone<R>(&self) -> Self
372367
where
373368
A: Clone,
374369
R: RcOps,
375370
{
376371
unsafe {
377372
if !self.is_dangling() {
378-
R::inc_ref(self.weak_count_unchecked());
373+
R::inc_weak(self.weak_count_unchecked());
379374
}
380375

381376
self.clone_without_inc_ref()
@@ -402,7 +397,7 @@ where
402397
R: RcOps,
403398
{
404399
unsafe {
405-
if R::dec_ref(self.weak_count_unchecked()) {
400+
if R::dec_weak(self.weak_count_unchecked()) {
406401
self.deallocate();
407402
}
408403
};
@@ -741,7 +736,7 @@ where
741736
R: RcOps,
742737
{
743738
unsafe {
744-
R::inc_ref(self.strong_count());
739+
R::inc_strong(self.strong_count());
745740

746741
Self::from_weak(self.weak.clone_without_inc_ref())
747742
}
@@ -762,7 +757,7 @@ where
762757
}
763758

764759
pub unsafe fn increment_strong_count<R: RcOps>(ptr: NonNull<T>) {
765-
unsafe { R::inc_ref(strong_count_ptr_from_value_ptr(ptr.cast()).as_ref()) };
760+
unsafe { R::inc_strong(strong_count_ptr_from_value_ptr(ptr.cast()).as_ref()) };
766761
}
767762

768763
#[inline(never)]
@@ -780,7 +775,7 @@ where
780775
R: RcOps,
781776
{
782777
unsafe {
783-
if R::dec_ref(self.strong_count()) {
778+
if R::dec_strong(self.strong_count()) {
784779
self.drop_slow::<R>();
785780
}
786781
};
@@ -791,7 +786,11 @@ where
791786
A: Clone,
792787
R: RcOps,
793788
{
794-
unsafe { self.weak.clone_unchecked::<R>() }
789+
unsafe {
790+
R::downgrade(self.weak_count());
791+
792+
self.weak.clone_without_inc_ref()
793+
}
795794
}
796795

797796
pub unsafe fn get_mut_unchecked(&mut self) -> &mut T {

library/alloc/src/rc.rs

Lines changed: 42 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -279,37 +279,53 @@ where
279279
move |raw_weak: &RawWeak<T, A>| f(unsafe { mem::transmute(raw_weak) })
280280
}
281281

282-
enum RcOps {}
282+
unsafe fn inc_ref(count: &UnsafeCell<usize>) {
283+
let count = unsafe { &mut *count.get() };
284+
let strong = *count;
283285

284-
unsafe impl raw_rc::RcOps for RcOps {
285-
unsafe fn inc_ref(count: &UnsafeCell<usize>) {
286-
let count = unsafe { &mut *count.get() };
287-
let strong = *count;
286+
// We insert an `assume` here to hint LLVM at an otherwise
287+
// missed optimization.
288+
// SAFETY: The reference count will never be zero when this is
289+
// called.
290+
unsafe { hint::assert_unchecked(strong != 0) };
291+
292+
let strong = count.wrapping_add(1);
293+
294+
*count = strong;
288295

289-
// We insert an `assume` here to hint LLVM at an otherwise
290-
// missed optimization.
291-
// SAFETY: The reference count will never be zero when this is
292-
// called.
293-
unsafe { hint::assert_unchecked(strong != 0) };
296+
// We want to abort on overflow instead of dropping the value.
297+
// Checking for overflow after the store instead of before
298+
// allows for slightly better code generation.
299+
if intrinsics::unlikely(strong == 0) {
300+
intrinsics::abort();
301+
}
302+
}
294303

295-
let strong = count.wrapping_add(1);
304+
unsafe fn dec_ref(count: &UnsafeCell<usize>) -> bool {
305+
let count = unsafe { &mut *count.get() };
296306

297-
*count = strong;
307+
*count -= 1;
298308

299-
// We want to abort on overflow instead of dropping the value.
300-
// Checking for overflow after the store instead of before
301-
// allows for slightly better code generation.
302-
if intrinsics::unlikely(strong == 0) {
303-
intrinsics::abort();
304-
}
309+
*count == 0
310+
}
311+
312+
enum RcOps {}
313+
314+
unsafe impl raw_rc::RcOps for RcOps {
315+
unsafe fn inc_strong(strong_count: &UnsafeCell<usize>) {
316+
unsafe { inc_ref(strong_count) };
305317
}
306318

307-
unsafe fn dec_ref(count: &UnsafeCell<usize>) -> bool {
308-
let count = unsafe { &mut *count.get() };
319+
unsafe fn dec_strong(strong_count: &UnsafeCell<usize>) -> bool {
320+
unsafe { dec_ref(strong_count) }
321+
}
309322

310-
*count -= 1;
323+
unsafe fn inc_weak(weak_count: &UnsafeCell<usize>) {
324+
unsafe { inc_ref(weak_count) };
325+
}
311326

312-
*count == 0
327+
unsafe fn dec_weak(weak_count: &UnsafeCell<usize>) -> bool {
328+
unsafe { dec_ref(weak_count) }
313329
}
314330

315331
unsafe fn upgrade(strong_count: &UnsafeCell<usize>) -> bool {
@@ -324,6 +340,10 @@ unsafe impl raw_rc::RcOps for RcOps {
324340
}
325341
}
326342

343+
unsafe fn downgrade(weak_count: &UnsafeCell<usize>) {
344+
unsafe { inc_ref(weak_count) };
345+
}
346+
327347
unsafe fn lock_strong_count(strong_count: &UnsafeCell<usize>) -> bool {
328348
let strong_count = unsafe { &mut *strong_count.get() };
329349

library/alloc/src/sync.rs

Lines changed: 92 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ use core::pin::{Pin, PinCoerceUnsized};
2424
use core::ptr::{self, NonNull};
2525
use core::sync::atomic::Ordering::{Acquire, Relaxed, Release};
2626
use core::sync::atomic::{self, AtomicUsize};
27-
use core::{borrow, fmt, intrinsics};
27+
use core::{borrow, fmt, hint, intrinsics};
2828

2929
#[cfg(not(no_global_oom_handling))]
3030
use crate::alloc::Layout;
@@ -80,55 +80,71 @@ macro_rules! acquire {
8080
};
8181
}
8282

83-
enum RcOps {}
83+
unsafe fn inc_ref(count: &UnsafeCell<usize>) {
84+
let count = unsafe { AtomicUsize::from_ptr(count.get()) };
85+
86+
// Using a relaxed ordering is alright here, as knowledge of the
87+
// original reference prevents other threads from erroneously deleting
88+
// the object.
89+
//
90+
// As explained in the [Boost documentation][1], Increasing the
91+
// reference counter can always be done with memory_order_relaxed: New
92+
// references to an object can only be formed from an existing
93+
// reference, and passing an existing reference from one thread to
94+
// another must already provide any required synchronization.
95+
//
96+
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
97+
let old_size = count.fetch_add(1, Relaxed);
98+
99+
// However we need to guard against massive refcounts in case someone is `mem::forget`ing
100+
// Arcs. If we don't do this the count can overflow and users will use-after free. This
101+
// branch will never be taken in any realistic program. We abort because such a program is
102+
// incredibly degenerate, and we don't care to support it.
103+
//
104+
// This check is not 100% water-proof: we error when the refcount grows beyond `isize::MAX`.
105+
// But we do that check *after* having done the increment, so there is a chance here that
106+
// the worst already happened and we actually do overflow the `usize` counter. However, that
107+
// requires the counter to grow from `isize::MAX` to `usize::MAX` between the increment
108+
// above and the `abort` below, which seems exceedingly unlikely.
109+
//
110+
// This is a global invariant, and also applies when using a compare-exchange loop to increment
111+
// counters in other methods.
112+
// Otherwise, the counter could be brought to an almost-overflow using a compare-exchange loop,
113+
// and then overflow using a few `fetch_add`s.
114+
if old_size > MAX_REFCOUNT {
115+
intrinsics::abort();
116+
}
117+
}
84118

85-
unsafe impl raw_rc::RcOps for RcOps {
86-
unsafe fn inc_ref(count: &UnsafeCell<usize>) {
87-
let count = unsafe { AtomicUsize::from_ptr(count.get()) };
119+
unsafe fn dec_ref(count: &UnsafeCell<usize>) -> bool {
120+
let count = unsafe { AtomicUsize::from_ptr(count.get()) };
88121

89-
// Using a relaxed ordering is alright here, as knowledge of the
90-
// original reference prevents other threads from erroneously deleting
91-
// the object.
92-
//
93-
// As explained in the [Boost documentation][1], Increasing the
94-
// reference counter can always be done with memory_order_relaxed: New
95-
// references to an object can only be formed from an existing
96-
// reference, and passing an existing reference from one thread to
97-
// another must already provide any required synchronization.
98-
//
99-
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
100-
let old_size = count.fetch_add(1, Relaxed);
122+
if count.fetch_sub(1, Release) == 1 {
123+
acquire!(count);
101124

102-
// However we need to guard against massive refcounts in case someone is `mem::forget`ing
103-
// Arcs. If we don't do this the count can overflow and users will use-after free. This
104-
// branch will never be taken in any realistic program. We abort because such a program is
105-
// incredibly degenerate, and we don't care to support it.
106-
//
107-
// This check is not 100% water-proof: we error when the refcount grows beyond `isize::MAX`.
108-
// But we do that check *after* having done the increment, so there is a chance here that
109-
// the worst already happened and we actually do overflow the `usize` counter. However, that
110-
// requires the counter to grow from `isize::MAX` to `usize::MAX` between the increment
111-
// above and the `abort` below, which seems exceedingly unlikely.
112-
//
113-
// This is a global invariant, and also applies when using a compare-exchange loop to increment
114-
// counters in other methods.
115-
// Otherwise, the counter could be brought to an almost-overflow using a compare-exchange loop,
116-
// and then overflow using a few `fetch_add`s.
117-
if old_size > MAX_REFCOUNT {
118-
intrinsics::abort();
119-
}
125+
true
126+
} else {
127+
false
120128
}
129+
}
121130

122-
unsafe fn dec_ref(count: &UnsafeCell<usize>) -> bool {
123-
let count = unsafe { AtomicUsize::from_ptr(count.get()) };
131+
enum RcOps {}
124132

125-
if count.fetch_sub(1, Release) == 1 {
126-
acquire!(count);
133+
unsafe impl raw_rc::RcOps for RcOps {
134+
unsafe fn inc_strong(strong_count: &UnsafeCell<usize>) {
135+
unsafe { inc_ref(strong_count) };
136+
}
127137

128-
true
129-
} else {
130-
false
131-
}
138+
unsafe fn dec_strong(strong_count: &UnsafeCell<usize>) -> bool {
139+
unsafe { dec_ref(strong_count) }
140+
}
141+
142+
unsafe fn inc_weak(weak_count: &UnsafeCell<usize>) {
143+
unsafe { inc_ref(weak_count) };
144+
}
145+
146+
unsafe fn dec_weak(weak_count: &UnsafeCell<usize>) -> bool {
147+
unsafe { dec_ref(weak_count) }
132148
}
133149

134150
unsafe fn upgrade(strong_count: &UnsafeCell<usize>) -> bool {
@@ -156,6 +172,39 @@ unsafe impl raw_rc::RcOps for RcOps {
156172
strong_count.fetch_update(Acquire, Relaxed, checked_increment).is_ok()
157173
}
158174

175+
unsafe fn downgrade(weak_count: &UnsafeCell<usize>) {
176+
let weak_count = unsafe { AtomicUsize::from_ptr(weak_count.get()) };
177+
178+
// This Relaxed is OK because we're checking the value in the CAS
179+
// below.
180+
let mut cur = weak_count.load(Relaxed);
181+
182+
loop {
183+
// check if the weak counter is currently "locked"; if so, spin.
184+
if cur == usize::MAX {
185+
hint::spin_loop();
186+
cur = weak_count.load(Relaxed);
187+
188+
continue;
189+
}
190+
191+
// We can't allow the refcount to increase much past `MAX_REFCOUNT`.
192+
assert!(cur <= MAX_REFCOUNT, "{}", INTERNAL_OVERFLOW_ERROR);
193+
194+
// NOTE: this code currently ignores the possibility of overflow
195+
// into usize::MAX; in general both Rc and Arc need to be adjusted
196+
// to deal with overflow.
197+
198+
// Unlike with Clone(), we need this to be an Acquire read to
199+
// synchronize with the write coming from `is_unique`, so that the
200+
// events prior to that write happen before this read.
201+
match weak_count.compare_exchange_weak(cur, cur + 1, Acquire, Relaxed) {
202+
Ok(_) => break,
203+
Err(old) => cur = old,
204+
}
205+
}
206+
}
207+
159208
unsafe fn lock_strong_count(strong_count: &UnsafeCell<usize>) -> bool {
160209
let strong_count = unsafe { AtomicUsize::from_ptr(strong_count.get()) };
161210

0 commit comments

Comments
 (0)