@@ -45,22 +45,241 @@ static inline unsigned int refcount_read(const refcount_t *r)
45
45
}
46
46
47
47
#ifdef CONFIG_REFCOUNT_FULL
48
+ #include <linux/bug.h>
48
49
49
50
#define REFCOUNT_MAX (UINT_MAX - 1)
50
51
#define REFCOUNT_SATURATED UINT_MAX
51
52
52
- extern __must_check bool refcount_add_not_zero (int i , refcount_t * r );
53
- extern void refcount_add (int i , refcount_t * r );
53
+ /*
54
+ * Variant of atomic_t specialized for reference counts.
55
+ *
56
+ * The interface matches the atomic_t interface (to aid in porting) but only
57
+ * provides the few functions one should use for reference counting.
58
+ *
59
+ * It differs in that the counter saturates at REFCOUNT_SATURATED and will not
60
+ * move once there. This avoids wrapping the counter and causing 'spurious'
61
+ * use-after-free issues.
62
+ *
63
+ * Memory ordering rules are slightly relaxed wrt regular atomic_t functions
64
+ * and provide only what is strictly required for refcounts.
65
+ *
66
+ * The increments are fully relaxed; these will not provide ordering. The
67
+ * rationale is that whatever is used to obtain the object we're increasing the
68
+ * reference count on will provide the ordering. For locked data structures,
69
+ * its the lock acquire, for RCU/lockless data structures its the dependent
70
+ * load.
71
+ *
72
+ * Do note that inc_not_zero() provides a control dependency which will order
73
+ * future stores against the inc, this ensures we'll never modify the object
74
+ * if we did not in fact acquire a reference.
75
+ *
76
+ * The decrements will provide release order, such that all the prior loads and
77
+ * stores will be issued before, it also provides a control dependency, which
78
+ * will order us against the subsequent free().
79
+ *
80
+ * The control dependency is against the load of the cmpxchg (ll/sc) that
81
+ * succeeded. This means the stores aren't fully ordered, but this is fine
82
+ * because the 1->0 transition indicates no concurrency.
83
+ *
84
+ * Note that the allocator is responsible for ordering things between free()
85
+ * and alloc().
86
+ *
87
+ * The decrements dec_and_test() and sub_and_test() also provide acquire
88
+ * ordering on success.
89
+ *
90
+ */
91
+
92
+ /**
93
+ * refcount_add_not_zero - add a value to a refcount unless it is 0
94
+ * @i: the value to add to the refcount
95
+ * @r: the refcount
96
+ *
97
+ * Will saturate at REFCOUNT_SATURATED and WARN.
98
+ *
99
+ * Provides no memory ordering, it is assumed the caller has guaranteed the
100
+ * object memory to be stable (RCU, etc.). It does provide a control dependency
101
+ * and thereby orders future stores. See the comment on top.
102
+ *
103
+ * Use of this function is not recommended for the normal reference counting
104
+ * use case in which references are taken and released one at a time. In these
105
+ * cases, refcount_inc(), or one of its variants, should instead be used to
106
+ * increment a reference count.
107
+ *
108
+ * Return: false if the passed refcount is 0, true otherwise
109
+ */
110
+ static inline __must_check bool refcount_add_not_zero (int i , refcount_t * r )
111
+ {
112
+ unsigned int new , val = atomic_read (& r -> refs );
113
+
114
+ do {
115
+ if (!val )
116
+ return false;
117
+
118
+ if (unlikely (val == REFCOUNT_SATURATED ))
119
+ return true;
120
+
121
+ new = val + i ;
122
+ if (new < val )
123
+ new = REFCOUNT_SATURATED ;
124
+
125
+ } while (!atomic_try_cmpxchg_relaxed (& r -> refs , & val , new ));
126
+
127
+ WARN_ONCE (new == REFCOUNT_SATURATED ,
128
+ "refcount_t: saturated; leaking memory.\n" );
129
+
130
+ return true;
131
+ }
132
+
133
+ /**
134
+ * refcount_add - add a value to a refcount
135
+ * @i: the value to add to the refcount
136
+ * @r: the refcount
137
+ *
138
+ * Similar to atomic_add(), but will saturate at REFCOUNT_SATURATED and WARN.
139
+ *
140
+ * Provides no memory ordering, it is assumed the caller has guaranteed the
141
+ * object memory to be stable (RCU, etc.). It does provide a control dependency
142
+ * and thereby orders future stores. See the comment on top.
143
+ *
144
+ * Use of this function is not recommended for the normal reference counting
145
+ * use case in which references are taken and released one at a time. In these
146
+ * cases, refcount_inc(), or one of its variants, should instead be used to
147
+ * increment a reference count.
148
+ */
149
+ static inline void refcount_add (int i , refcount_t * r )
150
+ {
151
+ WARN_ONCE (!refcount_add_not_zero (i , r ), "refcount_t: addition on 0; use-after-free.\n" );
152
+ }
153
+
154
+ /**
155
+ * refcount_inc_not_zero - increment a refcount unless it is 0
156
+ * @r: the refcount to increment
157
+ *
158
+ * Similar to atomic_inc_not_zero(), but will saturate at REFCOUNT_SATURATED
159
+ * and WARN.
160
+ *
161
+ * Provides no memory ordering, it is assumed the caller has guaranteed the
162
+ * object memory to be stable (RCU, etc.). It does provide a control dependency
163
+ * and thereby orders future stores. See the comment on top.
164
+ *
165
+ * Return: true if the increment was successful, false otherwise
166
+ */
167
+ static inline __must_check bool refcount_inc_not_zero (refcount_t * r )
168
+ {
169
+ unsigned int new , val = atomic_read (& r -> refs );
170
+
171
+ do {
172
+ new = val + 1 ;
54
173
55
- extern __must_check bool refcount_inc_not_zero ( refcount_t * r );
56
- extern void refcount_inc ( refcount_t * r ) ;
174
+ if (! val )
175
+ return false ;
57
176
58
- extern __must_check bool refcount_sub_and_test (int i , refcount_t * r );
177
+ if (unlikely (!new ))
178
+ return true;
59
179
60
- extern __must_check bool refcount_dec_and_test (refcount_t * r );
61
- extern void refcount_dec (refcount_t * r );
180
+ } while (!atomic_try_cmpxchg_relaxed (& r -> refs , & val , new ));
181
+
182
+ WARN_ONCE (new == REFCOUNT_SATURATED ,
183
+ "refcount_t: saturated; leaking memory.\n" );
184
+
185
+ return true;
186
+ }
187
+
188
+ /**
189
+ * refcount_inc - increment a refcount
190
+ * @r: the refcount to increment
191
+ *
192
+ * Similar to atomic_inc(), but will saturate at REFCOUNT_SATURATED and WARN.
193
+ *
194
+ * Provides no memory ordering, it is assumed the caller already has a
195
+ * reference on the object.
196
+ *
197
+ * Will WARN if the refcount is 0, as this represents a possible use-after-free
198
+ * condition.
199
+ */
200
+ static inline void refcount_inc (refcount_t * r )
201
+ {
202
+ WARN_ONCE (!refcount_inc_not_zero (r ), "refcount_t: increment on 0; use-after-free.\n" );
203
+ }
204
+
205
+ /**
206
+ * refcount_sub_and_test - subtract from a refcount and test if it is 0
207
+ * @i: amount to subtract from the refcount
208
+ * @r: the refcount
209
+ *
210
+ * Similar to atomic_dec_and_test(), but it will WARN, return false and
211
+ * ultimately leak on underflow and will fail to decrement when saturated
212
+ * at REFCOUNT_SATURATED.
213
+ *
214
+ * Provides release memory ordering, such that prior loads and stores are done
215
+ * before, and provides an acquire ordering on success such that free()
216
+ * must come after.
217
+ *
218
+ * Use of this function is not recommended for the normal reference counting
219
+ * use case in which references are taken and released one at a time. In these
220
+ * cases, refcount_dec(), or one of its variants, should instead be used to
221
+ * decrement a reference count.
222
+ *
223
+ * Return: true if the resulting refcount is 0, false otherwise
224
+ */
225
+ static inline __must_check bool refcount_sub_and_test (int i , refcount_t * r )
226
+ {
227
+ unsigned int new , val = atomic_read (& r -> refs );
228
+
229
+ do {
230
+ if (unlikely (val == REFCOUNT_SATURATED ))
231
+ return false;
232
+
233
+ new = val - i ;
234
+ if (new > val ) {
235
+ WARN_ONCE (new > val , "refcount_t: underflow; use-after-free.\n" );
236
+ return false;
237
+ }
238
+
239
+ } while (!atomic_try_cmpxchg_release (& r -> refs , & val , new ));
240
+
241
+ if (!new ) {
242
+ smp_acquire__after_ctrl_dep ();
243
+ return true;
244
+ }
245
+ return false;
246
+
247
+ }
248
+
249
+ /**
250
+ * refcount_dec_and_test - decrement a refcount and test if it is 0
251
+ * @r: the refcount
252
+ *
253
+ * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
254
+ * decrement when saturated at REFCOUNT_SATURATED.
255
+ *
256
+ * Provides release memory ordering, such that prior loads and stores are done
257
+ * before, and provides an acquire ordering on success such that free()
258
+ * must come after.
259
+ *
260
+ * Return: true if the resulting refcount is 0, false otherwise
261
+ */
262
+ static inline __must_check bool refcount_dec_and_test (refcount_t * r )
263
+ {
264
+ return refcount_sub_and_test (1 , r );
265
+ }
266
+
267
+ /**
268
+ * refcount_dec - decrement a refcount
269
+ * @r: the refcount
270
+ *
271
+ * Similar to atomic_dec(), it will WARN on underflow and fail to decrement
272
+ * when saturated at REFCOUNT_SATURATED.
273
+ *
274
+ * Provides release memory ordering, such that prior loads and stores are done
275
+ * before.
276
+ */
277
+ static inline void refcount_dec (refcount_t * r )
278
+ {
279
+ WARN_ONCE (refcount_dec_and_test (r ), "refcount_t: decrement hit 0; leaking memory.\n" );
280
+ }
62
281
63
- #else
282
+ #else /* CONFIG_REFCOUNT_FULL */
64
283
65
284
#define REFCOUNT_MAX INT_MAX
66
285
#define REFCOUNT_SATURATED (INT_MIN / 2)
@@ -103,7 +322,7 @@ static inline void refcount_dec(refcount_t *r)
103
322
atomic_dec (& r -> refs );
104
323
}
105
324
# endif /* !CONFIG_ARCH_HAS_REFCOUNT */
106
- #endif /* CONFIG_REFCOUNT_FULL */
325
+ #endif /* ! CONFIG_REFCOUNT_FULL */
107
326
108
327
extern __must_check bool refcount_dec_if_one (refcount_t * r );
109
328
extern __must_check bool refcount_dec_not_one (refcount_t * r );
0 commit comments