Skip to content

Commit 5ba113d

Browse files
axboegregkh
authored andcommitted
io_uring/msg_ring: kill alloc_cache for io_kiocb allocations
[ Upstream commit df8922a ] A recent commit: fc582cd ("io_uring/msg_ring: ensure io_kiocb freeing is deferred for RCU") fixed an issue with not deferring freeing of io_kiocb structs that msg_ring allocates to after the current RCU grace period. But this only covers requests that don't end up in the allocation cache. If a request goes into the alloc cache, it can get reused before it is sane to do so. A recent syzbot report would seem to indicate that there's something there, however it may very well just be because of the KASAN poisoning that the alloc_cache handles manually. Rather than attempt to make the alloc_cache sane for that use case, just drop the usage of the alloc_cache for msg_ring request payload data. Fixes: 50cf5f3 ("io_uring/msg_ring: add an alloc cache for io_kiocb entries") Link: https://lore.kernel.org/io-uring/[email protected]/ Reported-by: [email protected] Signed-off-by: Jens Axboe <[email protected]> Signed-off-by: Sasha Levin <[email protected]>
1 parent 045ee26 commit 5ba113d

File tree

3 files changed

+2
-29
lines changed

3 files changed

+2
-29
lines changed

include/linux/io_uring_types.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -418,9 +418,6 @@ struct io_ring_ctx {
418418
struct list_head defer_list;
419419
unsigned nr_drained;
420420

421-
struct io_alloc_cache msg_cache;
422-
spinlock_t msg_lock;
423-
424421
#ifdef CONFIG_NET_RX_BUSY_POLL
425422
struct list_head napi_list; /* track busy poll napi_id */
426423
spinlock_t napi_lock; /* napi_list lock */

io_uring/io_uring.c

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -290,7 +290,6 @@ static void io_free_alloc_caches(struct io_ring_ctx *ctx)
290290
io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free);
291291
io_alloc_cache_free(&ctx->rw_cache, io_rw_cache_free);
292292
io_alloc_cache_free(&ctx->cmd_cache, io_cmd_cache_free);
293-
io_alloc_cache_free(&ctx->msg_cache, kfree);
294293
io_futex_cache_free(ctx);
295294
io_rsrc_cache_free(ctx);
296295
}
@@ -337,9 +336,6 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
337336
ret |= io_alloc_cache_init(&ctx->cmd_cache, IO_ALLOC_CACHE_MAX,
338337
sizeof(struct io_async_cmd),
339338
sizeof(struct io_async_cmd));
340-
spin_lock_init(&ctx->msg_lock);
341-
ret |= io_alloc_cache_init(&ctx->msg_cache, IO_ALLOC_CACHE_MAX,
342-
sizeof(struct io_kiocb), 0);
343339
ret |= io_futex_cache_init(ctx);
344340
ret |= io_rsrc_cache_init(ctx);
345341
if (ret)

io_uring/msg_ring.c

Lines changed: 2 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,6 @@
1111
#include "io_uring.h"
1212
#include "rsrc.h"
1313
#include "filetable.h"
14-
#include "alloc_cache.h"
1514
#include "msg_ring.h"
1615

1716
/* All valid masks for MSG_RING */
@@ -76,13 +75,7 @@ static void io_msg_tw_complete(struct io_kiocb *req, io_tw_token_t tw)
7675
struct io_ring_ctx *ctx = req->ctx;
7776

7877
io_add_aux_cqe(ctx, req->cqe.user_data, req->cqe.res, req->cqe.flags);
79-
if (spin_trylock(&ctx->msg_lock)) {
80-
if (io_alloc_cache_put(&ctx->msg_cache, req))
81-
req = NULL;
82-
spin_unlock(&ctx->msg_lock);
83-
}
84-
if (req)
85-
kfree_rcu(req, rcu_head);
78+
kfree_rcu(req, rcu_head);
8679
percpu_ref_put(&ctx->refs);
8780
}
8881

@@ -104,26 +97,13 @@ static int io_msg_remote_post(struct io_ring_ctx *ctx, struct io_kiocb *req,
10497
return 0;
10598
}
10699

107-
static struct io_kiocb *io_msg_get_kiocb(struct io_ring_ctx *ctx)
108-
{
109-
struct io_kiocb *req = NULL;
110-
111-
if (spin_trylock(&ctx->msg_lock)) {
112-
req = io_alloc_cache_get(&ctx->msg_cache);
113-
spin_unlock(&ctx->msg_lock);
114-
if (req)
115-
return req;
116-
}
117-
return kmem_cache_alloc(req_cachep, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
118-
}
119-
120100
static int io_msg_data_remote(struct io_ring_ctx *target_ctx,
121101
struct io_msg *msg)
122102
{
123103
struct io_kiocb *target;
124104
u32 flags = 0;
125105

126-
target = io_msg_get_kiocb(target_ctx);
106+
target = kmem_cache_alloc(req_cachep, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO) ;
127107
if (unlikely(!target))
128108
return -ENOMEM;
129109

0 commit comments

Comments
 (0)