Skip to content

Commit db1e1ad

Browse files
Olivier Langloisaxboe
authored andcommitted
io_uring/napi: Use lock guards
Convert napi locks to use the shiny new Scope-Based Resource Management machinery. Signed-off-by: Olivier Langlois <[email protected]> Link: https://lore.kernel.org/r/2680ca47ee183cfdb89d1a40c84d349edeb620ab.1728828877.git.olivier@trillion01.com Signed-off-by: Jens Axboe <[email protected]>
1 parent a5e26f4 commit db1e1ad

File tree

1 file changed

+21
-19
lines changed

1 file changed

+21
-19
lines changed

io_uring/napi.c

Lines changed: 21 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -49,14 +49,13 @@ int __io_napi_add_id(struct io_ring_ctx *ctx, unsigned int napi_id)
4949

5050
hash_list = &ctx->napi_ht[hash_min(napi_id, HASH_BITS(ctx->napi_ht))];
5151

52-
rcu_read_lock();
53-
e = io_napi_hash_find(hash_list, napi_id);
54-
if (e) {
55-
WRITE_ONCE(e->timeout, jiffies + NAPI_TIMEOUT);
56-
rcu_read_unlock();
57-
return -EEXIST;
52+
scoped_guard(rcu) {
53+
e = io_napi_hash_find(hash_list, napi_id);
54+
if (e) {
55+
WRITE_ONCE(e->timeout, jiffies + NAPI_TIMEOUT);
56+
return -EEXIST;
57+
}
5858
}
59-
rcu_read_unlock();
6059

6160
e = kmalloc(sizeof(*e), GFP_NOWAIT);
6261
if (!e)
@@ -65,6 +64,10 @@ int __io_napi_add_id(struct io_ring_ctx *ctx, unsigned int napi_id)
6564
e->napi_id = napi_id;
6665
e->timeout = jiffies + NAPI_TIMEOUT;
6766

67+
/*
68+
* guard(spinlock) is not used to manually unlock it before calling
69+
* kfree()
70+
*/
6871
spin_lock(&ctx->napi_lock);
6972
if (unlikely(io_napi_hash_find(hash_list, napi_id))) {
7073
spin_unlock(&ctx->napi_lock);
@@ -82,7 +85,7 @@ static void __io_napi_remove_stale(struct io_ring_ctx *ctx)
8285
{
8386
struct io_napi_entry *e;
8487

85-
spin_lock(&ctx->napi_lock);
88+
guard(spinlock)(&ctx->napi_lock);
8689
/*
8790
* list_for_each_entry_safe() is not required as long as:
8891
* 1. list_del_rcu() does not reset the deleted node next pointer
@@ -96,7 +99,6 @@ static void __io_napi_remove_stale(struct io_ring_ctx *ctx)
9699
kfree_rcu(e, rcu);
97100
}
98101
}
99-
spin_unlock(&ctx->napi_lock);
100102
}
101103

102104
static inline void io_napi_remove_stale(struct io_ring_ctx *ctx, bool is_stale)
@@ -168,11 +170,12 @@ static void io_napi_blocking_busy_loop(struct io_ring_ctx *ctx,
168170
if (list_is_singular(&ctx->napi_list))
169171
loop_end_arg = iowq;
170172

171-
rcu_read_lock();
172-
do {
173-
is_stale = __io_napi_do_busy_loop(ctx, loop_end_arg);
174-
} while (!io_napi_busy_loop_should_end(iowq, start_time) && !loop_end_arg);
175-
rcu_read_unlock();
173+
scoped_guard(rcu) {
174+
do {
175+
is_stale = __io_napi_do_busy_loop(ctx, loop_end_arg);
176+
} while (!io_napi_busy_loop_should_end(iowq, start_time) &&
177+
!loop_end_arg);
178+
}
176179

177180
io_napi_remove_stale(ctx, is_stale);
178181
}
@@ -203,13 +206,12 @@ void io_napi_free(struct io_ring_ctx *ctx)
203206
{
204207
struct io_napi_entry *e;
205208

206-
spin_lock(&ctx->napi_lock);
209+
guard(spinlock)(&ctx->napi_lock);
207210
list_for_each_entry(e, &ctx->napi_list, list) {
208211
hash_del_rcu(&e->node);
209212
kfree_rcu(e, rcu);
210213
}
211214
INIT_LIST_HEAD_RCU(&ctx->napi_list);
212-
spin_unlock(&ctx->napi_lock);
213215
}
214216

215217
/*
@@ -305,9 +307,9 @@ int io_napi_sqpoll_busy_poll(struct io_ring_ctx *ctx)
305307
if (list_empty_careful(&ctx->napi_list))
306308
return 0;
307309

308-
rcu_read_lock();
309-
is_stale = __io_napi_do_busy_loop(ctx, NULL);
310-
rcu_read_unlock();
310+
scoped_guard(rcu) {
311+
is_stale = __io_napi_do_busy_loop(ctx, NULL);
312+
}
311313

312314
io_napi_remove_stale(ctx, is_stale);
313315
return 1;

0 commit comments

Comments
 (0)