@@ -41,14 +41,15 @@ static inline bool try_assign_cnt(struct unwind_task_info *info, u32 cnt)
41
41
#define UNWIND_MAX_ENTRIES \
42
42
((SZ_4K - sizeof(struct unwind_cache)) / sizeof(long))
43
43
44
- /* Guards adding to and reading the list of callbacks */
44
+ /* Guards adding to or removing from the list of callbacks */
45
45
static DEFINE_MUTEX (callback_mutex );
46
46
static LIST_HEAD (callbacks );
47
47
48
48
#define RESERVED_BITS (UNWIND_PENDING | UNWIND_USED)
49
49
50
50
/* Zero'd bits are available for assigning callback users */
51
51
static unsigned long unwind_mask = RESERVED_BITS ;
52
+ DEFINE_STATIC_SRCU (unwind_srcu );
52
53
53
54
static inline bool unwind_pending (struct unwind_task_info * info )
54
55
{
@@ -174,8 +175,9 @@ static void unwind_deferred_task_work(struct callback_head *head)
174
175
175
176
cookie = info -> id .id ;
176
177
177
- guard (mutex )(& callback_mutex );
178
- list_for_each_entry (work , & callbacks , list ) {
178
+ guard (srcu )(& unwind_srcu );
179
+ list_for_each_entry_srcu (work , & callbacks , list ,
180
+ srcu_read_lock_held (& unwind_srcu )) {
179
181
if (test_bit (work -> bit , & bits )) {
180
182
work -> func (work , & trace , cookie );
181
183
if (info -> cache )
@@ -213,7 +215,7 @@ int unwind_deferred_request(struct unwind_work *work, u64 *cookie)
213
215
{
214
216
struct unwind_task_info * info = & current -> unwind_info ;
215
217
unsigned long old , bits ;
216
- unsigned long bit = BIT ( work -> bit ) ;
218
+ unsigned long bit ;
217
219
int ret ;
218
220
219
221
* cookie = 0 ;
@@ -230,6 +232,14 @@ int unwind_deferred_request(struct unwind_work *work, u64 *cookie)
230
232
if (WARN_ON_ONCE (!CAN_USE_IN_NMI && in_nmi ()))
231
233
return - EINVAL ;
232
234
235
+ /* Do not allow cancelled works to request again */
236
+ bit = READ_ONCE (work -> bit );
237
+ if (WARN_ON_ONCE (bit < 0 ))
238
+ return - EINVAL ;
239
+
240
+ /* Only need the mask now */
241
+ bit = BIT (bit );
242
+
233
243
guard (irqsave )();
234
244
235
245
* cookie = get_cookie (info );
@@ -281,10 +291,15 @@ void unwind_deferred_cancel(struct unwind_work *work)
281
291
return ;
282
292
283
293
guard (mutex )(& callback_mutex );
284
- list_del (& work -> list );
294
+ list_del_rcu (& work -> list );
295
+
296
+ /* Do not allow any more requests and prevent callbacks */
297
+ work -> bit = -1 ;
285
298
286
299
__clear_bit (bit , & unwind_mask );
287
300
301
+ synchronize_srcu (& unwind_srcu );
302
+
288
303
guard (rcu )();
289
304
/* Clear this bit from all threads */
290
305
for_each_process_thread (g , t ) {
@@ -307,7 +322,7 @@ int unwind_deferred_init(struct unwind_work *work, unwind_callback_t func)
307
322
work -> bit = ffz (unwind_mask );
308
323
__set_bit (work -> bit , & unwind_mask );
309
324
310
- list_add (& work -> list , & callbacks );
325
+ list_add_rcu (& work -> list , & callbacks );
311
326
work -> func = func ;
312
327
return 0 ;
313
328
}
0 commit comments