@@ -81,19 +81,24 @@ void __io_napi_add(struct io_ring_ctx *ctx, struct socket *sock)
81
81
}
82
82
83
83
hlist_add_tail_rcu (& e -> node , hash_list );
84
- list_add_tail (& e -> list , & ctx -> napi_list );
84
+ list_add_tail_rcu (& e -> list , & ctx -> napi_list );
85
85
spin_unlock (& ctx -> napi_lock );
86
86
}
87
87
88
88
static void __io_napi_remove_stale (struct io_ring_ctx * ctx )
89
89
{
90
90
struct io_napi_entry * e ;
91
- unsigned int i ;
92
91
93
92
spin_lock (& ctx -> napi_lock );
94
- hash_for_each (ctx -> napi_ht , i , e , node ) {
93
+ /*
94
+ * list_for_each_entry_safe() is not required as long as:
95
+ * 1. list_del_rcu() does not reset the deleted node next pointer
96
+ * 2. kfree_rcu() delays the memory freeing until the next quiescent
97
+ * state
98
+ */
99
+ list_for_each_entry (e , & ctx -> napi_list , list ) {
95
100
if (time_after (jiffies , READ_ONCE (e -> timeout ))) {
96
- list_del (& e -> list );
101
+ list_del_rcu (& e -> list );
97
102
hash_del_rcu (& e -> node );
98
103
kfree_rcu (e , rcu );
99
104
}
@@ -204,13 +209,13 @@ void io_napi_init(struct io_ring_ctx *ctx)
204
209
void io_napi_free (struct io_ring_ctx * ctx )
205
210
{
206
211
struct io_napi_entry * e ;
207
- unsigned int i ;
208
212
209
213
spin_lock (& ctx -> napi_lock );
210
- hash_for_each ( ctx -> napi_ht , i , e , node ) {
214
+ list_for_each_entry ( e , & ctx -> napi_list , list ) {
211
215
hash_del_rcu (& e -> node );
212
216
kfree_rcu (e , rcu );
213
217
}
218
+ INIT_LIST_HEAD_RCU (& ctx -> napi_list );
214
219
spin_unlock (& ctx -> napi_lock );
215
220
}
216
221
0 commit comments