@@ -24,7 +24,7 @@ use core::pin::{Pin, PinCoerceUnsized};
2424use core:: ptr:: { self , NonNull } ;
2525use core:: sync:: atomic:: Ordering :: { Acquire , Relaxed , Release } ;
2626use core:: sync:: atomic:: { self , AtomicUsize } ;
27- use core:: { borrow, fmt, intrinsics} ;
27+ use core:: { borrow, fmt, hint , intrinsics} ;
2828
2929#[ cfg( not( no_global_oom_handling) ) ]
3030use crate :: alloc:: Layout ;
@@ -80,55 +80,71 @@ macro_rules! acquire {
8080 } ;
8181}
8282
83- enum RcOps { }
83+ unsafe fn inc_ref ( count : & UnsafeCell < usize > ) {
84+ let count = unsafe { AtomicUsize :: from_ptr ( count. get ( ) ) } ;
85+
86+ // Using a relaxed ordering is alright here, as knowledge of the
87+ // original reference prevents other threads from erroneously deleting
88+ // the object.
89+ //
90+ // As explained in the [Boost documentation][1], Increasing the
91+ // reference counter can always be done with memory_order_relaxed: New
92+ // references to an object can only be formed from an existing
93+ // reference, and passing an existing reference from one thread to
94+ // another must already provide any required synchronization.
95+ //
96+ // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
97+ let old_size = count. fetch_add ( 1 , Relaxed ) ;
98+
99+ // However we need to guard against massive refcounts in case someone is `mem::forget`ing
100+ // Arcs. If we don't do this the count can overflow and users will use-after free. This
101+ // branch will never be taken in any realistic program. We abort because such a program is
102+ // incredibly degenerate, and we don't care to support it.
103+ //
104+ // This check is not 100% water-proof: we error when the refcount grows beyond `isize::MAX`.
105+ // But we do that check *after* having done the increment, so there is a chance here that
106+ // the worst already happened and we actually do overflow the `usize` counter. However, that
107+ // requires the counter to grow from `isize::MAX` to `usize::MAX` between the increment
108+ // above and the `abort` below, which seems exceedingly unlikely.
109+ //
110+ // This is a global invariant, and also applies when using a compare-exchange loop to increment
111+ // counters in other methods.
112+ // Otherwise, the counter could be brought to an almost-overflow using a compare-exchange loop,
113+ // and then overflow using a few `fetch_add`s.
114+ if old_size > MAX_REFCOUNT {
115+ intrinsics:: abort ( ) ;
116+ }
117+ }
84118
85- unsafe impl raw_rc:: RcOps for RcOps {
86- unsafe fn inc_ref ( count : & UnsafeCell < usize > ) {
87- let count = unsafe { AtomicUsize :: from_ptr ( count. get ( ) ) } ;
119+ unsafe fn dec_ref ( count : & UnsafeCell < usize > ) -> bool {
120+ let count = unsafe { AtomicUsize :: from_ptr ( count. get ( ) ) } ;
88121
89- // Using a relaxed ordering is alright here, as knowledge of the
90- // original reference prevents other threads from erroneously deleting
91- // the object.
92- //
93- // As explained in the [Boost documentation][1], Increasing the
94- // reference counter can always be done with memory_order_relaxed: New
95- // references to an object can only be formed from an existing
96- // reference, and passing an existing reference from one thread to
97- // another must already provide any required synchronization.
98- //
99- // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
100- let old_size = count. fetch_add ( 1 , Relaxed ) ;
122+ if count. fetch_sub ( 1 , Release ) == 1 {
123+ acquire ! ( count) ;
101124
102- // However we need to guard against massive refcounts in case someone is `mem::forget`ing
103- // Arcs. If we don't do this the count can overflow and users will use-after free. This
104- // branch will never be taken in any realistic program. We abort because such a program is
105- // incredibly degenerate, and we don't care to support it.
106- //
107- // This check is not 100% water-proof: we error when the refcount grows beyond `isize::MAX`.
108- // But we do that check *after* having done the increment, so there is a chance here that
109- // the worst already happened and we actually do overflow the `usize` counter. However, that
110- // requires the counter to grow from `isize::MAX` to `usize::MAX` between the increment
111- // above and the `abort` below, which seems exceedingly unlikely.
112- //
113- // This is a global invariant, and also applies when using a compare-exchange loop to increment
114- // counters in other methods.
115- // Otherwise, the counter could be brought to an almost-overflow using a compare-exchange loop,
116- // and then overflow using a few `fetch_add`s.
117- if old_size > MAX_REFCOUNT {
118- intrinsics:: abort ( ) ;
119- }
125+ true
126+ } else {
127+ false
120128 }
129+ }
121130
122- unsafe fn dec_ref ( count : & UnsafeCell < usize > ) -> bool {
123- let count = unsafe { AtomicUsize :: from_ptr ( count. get ( ) ) } ;
131+ enum RcOps { }
124132
125- if count. fetch_sub ( 1 , Release ) == 1 {
126- acquire ! ( count) ;
133+ unsafe impl raw_rc:: RcOps for RcOps {
134+ unsafe fn inc_strong ( strong_count : & UnsafeCell < usize > ) {
135+ unsafe { inc_ref ( strong_count) } ;
136+ }
127137
128- true
129- } else {
130- false
131- }
138+ unsafe fn dec_strong ( strong_count : & UnsafeCell < usize > ) -> bool {
139+ unsafe { dec_ref ( strong_count) }
140+ }
141+
142+ unsafe fn inc_weak ( weak_count : & UnsafeCell < usize > ) {
143+ unsafe { inc_ref ( weak_count) } ;
144+ }
145+
146+ unsafe fn dec_weak ( weak_count : & UnsafeCell < usize > ) -> bool {
147+ unsafe { dec_ref ( weak_count) }
132148 }
133149
134150 unsafe fn upgrade ( strong_count : & UnsafeCell < usize > ) -> bool {
@@ -156,6 +172,39 @@ unsafe impl raw_rc::RcOps for RcOps {
156172 strong_count. fetch_update ( Acquire , Relaxed , checked_increment) . is_ok ( )
157173 }
158174
175+ unsafe fn downgrade ( weak_count : & UnsafeCell < usize > ) {
176+ let weak_count = unsafe { AtomicUsize :: from_ptr ( weak_count. get ( ) ) } ;
177+
178+ // This Relaxed is OK because we're checking the value in the CAS
179+ // below.
180+ let mut cur = weak_count. load ( Relaxed ) ;
181+
182+ loop {
183+ // check if the weak counter is currently "locked"; if so, spin.
184+ if cur == usize:: MAX {
185+ hint:: spin_loop ( ) ;
186+ cur = weak_count. load ( Relaxed ) ;
187+
188+ continue ;
189+ }
190+
191+ // We can't allow the refcount to increase much past `MAX_REFCOUNT`.
192+ assert ! ( cur <= MAX_REFCOUNT , "{}" , INTERNAL_OVERFLOW_ERROR ) ;
193+
194+ // NOTE: this code currently ignores the possibility of overflow
195+ // into usize::MAX; in general both Rc and Arc need to be adjusted
196+ // to deal with overflow.
197+
198+ // Unlike with Clone(), we need this to be an Acquire read to
199+ // synchronize with the write coming from `is_unique`, so that the
200+ // events prior to that write happen before this read.
201+ match weak_count. compare_exchange_weak ( cur, cur + 1 , Acquire , Relaxed ) {
202+ Ok ( _) => break ,
203+ Err ( old) => cur = old,
204+ }
205+ }
206+ }
207+
159208 unsafe fn lock_strong_count ( strong_count : & UnsafeCell < usize > ) -> bool {
160209 let strong_count = unsafe { AtomicUsize :: from_ptr ( strong_count. get ( ) ) } ;
161210
0 commit comments