@@ -47,18 +47,56 @@ static inline unsigned int refcount_read(const refcount_t *r)
47
47
#ifdef CONFIG_REFCOUNT_FULL
48
48
#include <linux/bug.h>
49
49
50
- #define REFCOUNT_MAX (UINT_MAX - 1)
51
- #define REFCOUNT_SATURATED UINT_MAX
50
+ #define REFCOUNT_MAX INT_MAX
51
+ #define REFCOUNT_SATURATED (INT_MIN / 2)
52
52
53
53
/*
54
54
* Variant of atomic_t specialized for reference counts.
55
55
*
56
56
* The interface matches the atomic_t interface (to aid in porting) but only
57
57
* provides the few functions one should use for reference counting.
58
58
*
59
- * It differs in that the counter saturates at REFCOUNT_SATURATED and will not
60
- * move once there. This avoids wrapping the counter and causing 'spurious'
61
- * use-after-free issues.
59
+ * Saturation semantics
60
+ * ====================
61
+ *
62
+ * refcount_t differs from atomic_t in that the counter saturates at
63
+ * REFCOUNT_SATURATED and will not move once there. This avoids wrapping the
64
+ * counter and causing 'spurious' use-after-free issues. In order to avoid the
65
+ * cost associated with introducing cmpxchg() loops into all of the saturating
66
+ * operations, we temporarily allow the counter to take on an unchecked value
67
+ * and then explicitly set it to REFCOUNT_SATURATED on detecting that underflow
68
+ * or overflow has occurred. Although this is racy when multiple threads
69
+ * access the refcount concurrently, by placing REFCOUNT_SATURATED roughly
70
+ * equidistant from 0 and INT_MAX we minimise the scope for error:
71
+ *
72
+ * INT_MAX REFCOUNT_SATURATED UINT_MAX
73
+ * 0 (0x7fff_ffff) (0xc000_0000) (0xffff_ffff)
74
+ * +--------------------------------+----------------+----------------+
75
+ * <---------- bad value! ---------->
76
+ *
77
+ * (in a signed view of the world, the "bad value" range corresponds to
78
+ * a negative counter value).
79
+ *
80
+ * As an example, consider a refcount_inc() operation that causes the counter
81
+ * to overflow:
82
+ *
83
+ * int old = atomic_fetch_add_relaxed(r);
84
+ * // old is INT_MAX, refcount now INT_MIN (0x8000_0000)
85
+ * if (old < 0)
86
+ * atomic_set(r, REFCOUNT_SATURATED);
87
+ *
88
+ * If another thread also performs a refcount_inc() operation between the two
89
+ * atomic operations, then the count will continue to edge closer to 0. If it
90
+ * reaches a value of 1 before /any/ of the threads reset it to the saturated
91
+ * value, then a concurrent refcount_dec_and_test() may erroneously free the
92
+ * underlying object. Given the precise timing details involved with the
93
+ * round-robin scheduling of each thread manipulating the refcount and the need
94
+ * to hit the race multiple times in succession, there doesn't appear to be a
95
+ * practical avenue of attack even if using refcount_add() operations with
96
+ * larger increments.
97
+ *
98
+ * Memory ordering
99
+ * ===============
62
100
*
63
101
* Memory ordering rules are slightly relaxed wrt regular atomic_t functions
64
102
* and provide only what is strictly required for refcounts.
@@ -109,25 +147,19 @@ static inline unsigned int refcount_read(const refcount_t *r)
109
147
*/
110
148
static inline __must_check bool refcount_add_not_zero (int i , refcount_t * r )
111
149
{
112
- unsigned int new , val = atomic_read ( & r -> refs );
150
+ int old = refcount_read ( r );
113
151
114
152
do {
115
- if (!val )
116
- return false;
117
-
118
- if (unlikely (val == REFCOUNT_SATURATED ))
119
- return true;
120
-
121
- new = val + i ;
122
- if (new < val )
123
- new = REFCOUNT_SATURATED ;
153
+ if (!old )
154
+ break ;
155
+ } while (!atomic_try_cmpxchg_relaxed (& r -> refs , & old , old + i ));
124
156
125
- } while (! atomic_try_cmpxchg_relaxed ( & r -> refs , & val , new ));
126
-
127
- WARN_ONCE (new == REFCOUNT_SATURATED ,
128
- "refcount_t: saturated; leaking memory.\n" );
157
+ if ( unlikely ( old < 0 || old + i < 0 )) {
158
+ refcount_set ( r , REFCOUNT_SATURATED );
159
+ WARN_ONCE (1 , "refcount_t: saturated; leaking memory.\n" );
160
+ }
129
161
130
- return true ;
162
+ return old ;
131
163
}
132
164
133
165
/**
@@ -148,7 +180,13 @@ static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r)
148
180
*/
149
181
static inline void refcount_add (int i , refcount_t * r )
150
182
{
151
- WARN_ONCE (!refcount_add_not_zero (i , r ), "refcount_t: addition on 0; use-after-free.\n" );
183
+ int old = atomic_fetch_add_relaxed (i , & r -> refs );
184
+
185
+ WARN_ONCE (!old , "refcount_t: addition on 0; use-after-free.\n" );
186
+ if (unlikely (old <= 0 || old + i <= 0 )) {
187
+ refcount_set (r , REFCOUNT_SATURATED );
188
+ WARN_ONCE (old , "refcount_t: saturated; leaking memory.\n" );
189
+ }
152
190
}
153
191
154
192
/**
@@ -166,23 +204,7 @@ static inline void refcount_add(int i, refcount_t *r)
166
204
*/
167
205
static inline __must_check bool refcount_inc_not_zero (refcount_t * r )
168
206
{
169
- unsigned int new , val = atomic_read (& r -> refs );
170
-
171
- do {
172
- new = val + 1 ;
173
-
174
- if (!val )
175
- return false;
176
-
177
- if (unlikely (!new ))
178
- return true;
179
-
180
- } while (!atomic_try_cmpxchg_relaxed (& r -> refs , & val , new ));
181
-
182
- WARN_ONCE (new == REFCOUNT_SATURATED ,
183
- "refcount_t: saturated; leaking memory.\n" );
184
-
185
- return true;
207
+ return refcount_add_not_zero (1 , r );
186
208
}
187
209
188
210
/**
@@ -199,7 +221,7 @@ static inline __must_check bool refcount_inc_not_zero(refcount_t *r)
199
221
*/
200
222
static inline void refcount_inc (refcount_t * r )
201
223
{
202
- WARN_ONCE (! refcount_inc_not_zero ( r ), "refcount_t: increment on 0; use-after-free.\n" );
224
+ refcount_add ( 1 , r );
203
225
}
204
226
205
227
/**
@@ -224,26 +246,19 @@ static inline void refcount_inc(refcount_t *r)
224
246
*/
225
247
static inline __must_check bool refcount_sub_and_test (int i , refcount_t * r )
226
248
{
227
- unsigned int new , val = atomic_read (& r -> refs );
228
-
229
- do {
230
- if (unlikely (val == REFCOUNT_SATURATED ))
231
- return false;
249
+ int old = atomic_fetch_sub_release (i , & r -> refs );
232
250
233
- new = val - i ;
234
- if (new > val ) {
235
- WARN_ONCE (new > val , "refcount_t: underflow; use-after-free.\n" );
236
- return false;
237
- }
238
-
239
- } while (!atomic_try_cmpxchg_release (& r -> refs , & val , new ));
240
-
241
- if (!new ) {
251
+ if (old == i ) {
242
252
smp_acquire__after_ctrl_dep ();
243
253
return true;
244
254
}
245
- return false;
246
255
256
+ if (unlikely (old < 0 || old - i < 0 )) {
257
+ refcount_set (r , REFCOUNT_SATURATED );
258
+ WARN_ONCE (1 , "refcount_t: underflow; use-after-free.\n" );
259
+ }
260
+
261
+ return false;
247
262
}
248
263
249
264
/**
@@ -276,9 +291,13 @@ static inline __must_check bool refcount_dec_and_test(refcount_t *r)
276
291
*/
277
292
static inline void refcount_dec (refcount_t * r )
278
293
{
279
- WARN_ONCE (refcount_dec_and_test (r ), "refcount_t: decrement hit 0; leaking memory.\n" );
280
- }
294
+ int old = atomic_fetch_sub_release (1 , & r -> refs );
281
295
296
+ if (unlikely (old <= 1 )) {
297
+ refcount_set (r , REFCOUNT_SATURATED );
298
+ WARN_ONCE (1 , "refcount_t: decrement hit 0; leaking memory.\n" );
299
+ }
300
+ }
282
301
#else /* CONFIG_REFCOUNT_FULL */
283
302
284
303
#define REFCOUNT_MAX INT_MAX
0 commit comments