@@ -49,14 +49,13 @@ int __io_napi_add_id(struct io_ring_ctx *ctx, unsigned int napi_id)
49
49
50
50
hash_list = & ctx -> napi_ht [hash_min (napi_id , HASH_BITS (ctx -> napi_ht ))];
51
51
52
- rcu_read_lock ();
53
- e = io_napi_hash_find (hash_list , napi_id );
54
- if (e ) {
55
- WRITE_ONCE (e -> timeout , jiffies + NAPI_TIMEOUT );
56
- rcu_read_unlock () ;
57
- return - EEXIST ;
52
+ scoped_guard ( rcu ) {
53
+ e = io_napi_hash_find (hash_list , napi_id );
54
+ if (e ) {
55
+ WRITE_ONCE (e -> timeout , jiffies + NAPI_TIMEOUT );
56
+ return - EEXIST ;
57
+ }
58
58
}
59
- rcu_read_unlock ();
60
59
61
60
e = kmalloc (sizeof (* e ), GFP_NOWAIT );
62
61
if (!e )
@@ -65,6 +64,10 @@ int __io_napi_add_id(struct io_ring_ctx *ctx, unsigned int napi_id)
65
64
e -> napi_id = napi_id ;
66
65
e -> timeout = jiffies + NAPI_TIMEOUT ;
67
66
67
+ /*
68
+ * guard(spinlock) is not used to manually unlock it before calling
69
+ * kfree()
70
+ */
68
71
spin_lock (& ctx -> napi_lock );
69
72
if (unlikely (io_napi_hash_find (hash_list , napi_id ))) {
70
73
spin_unlock (& ctx -> napi_lock );
@@ -82,7 +85,7 @@ static void __io_napi_remove_stale(struct io_ring_ctx *ctx)
82
85
{
83
86
struct io_napi_entry * e ;
84
87
85
- spin_lock (& ctx -> napi_lock );
88
+ guard ( spinlock ) (& ctx -> napi_lock );
86
89
/*
87
90
* list_for_each_entry_safe() is not required as long as:
88
91
* 1. list_del_rcu() does not reset the deleted node next pointer
@@ -96,7 +99,6 @@ static void __io_napi_remove_stale(struct io_ring_ctx *ctx)
96
99
kfree_rcu (e , rcu );
97
100
}
98
101
}
99
- spin_unlock (& ctx -> napi_lock );
100
102
}
101
103
102
104
static inline void io_napi_remove_stale (struct io_ring_ctx * ctx , bool is_stale )
@@ -168,11 +170,12 @@ static void io_napi_blocking_busy_loop(struct io_ring_ctx *ctx,
168
170
if (list_is_singular (& ctx -> napi_list ))
169
171
loop_end_arg = iowq ;
170
172
171
- rcu_read_lock ();
172
- do {
173
- is_stale = __io_napi_do_busy_loop (ctx , loop_end_arg );
174
- } while (!io_napi_busy_loop_should_end (iowq , start_time ) && !loop_end_arg );
175
- rcu_read_unlock ();
173
+ scoped_guard (rcu ) {
174
+ do {
175
+ is_stale = __io_napi_do_busy_loop (ctx , loop_end_arg );
176
+ } while (!io_napi_busy_loop_should_end (iowq , start_time ) &&
177
+ !loop_end_arg );
178
+ }
176
179
177
180
io_napi_remove_stale (ctx , is_stale );
178
181
}
@@ -203,13 +206,12 @@ void io_napi_free(struct io_ring_ctx *ctx)
203
206
{
204
207
struct io_napi_entry * e ;
205
208
206
- spin_lock (& ctx -> napi_lock );
209
+ guard ( spinlock ) (& ctx -> napi_lock );
207
210
list_for_each_entry (e , & ctx -> napi_list , list ) {
208
211
hash_del_rcu (& e -> node );
209
212
kfree_rcu (e , rcu );
210
213
}
211
214
INIT_LIST_HEAD_RCU (& ctx -> napi_list );
212
- spin_unlock (& ctx -> napi_lock );
213
215
}
214
216
215
217
/*
@@ -305,9 +307,9 @@ int io_napi_sqpoll_busy_poll(struct io_ring_ctx *ctx)
305
307
if (list_empty_careful (& ctx -> napi_list ))
306
308
return 0 ;
307
309
308
- rcu_read_lock ();
309
- is_stale = __io_napi_do_busy_loop (ctx , NULL );
310
- rcu_read_unlock ();
310
+ scoped_guard ( rcu ) {
311
+ is_stale = __io_napi_do_busy_loop (ctx , NULL );
312
+ }
311
313
312
314
io_napi_remove_stale (ctx , is_stale );
313
315
return 1 ;
0 commit comments