Skip to content

Commit 010e8e6

Browse files
isilenceaxboe
authored andcommitted
io_uring: de-unionise io_kiocb
As io_kiocb have enough space, move ->work out of a union. It's safer this way and removes ->work memcpy bouncing. By the way make tabulation in struct io_kiocb consistent. Signed-off-by: Pavel Begunkov <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
1 parent b089ed3 commit 010e8e6

File tree

1 file changed

+14
-45
lines changed

1 file changed

+14
-45
lines changed

fs/io_uring.c

Lines changed: 14 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -600,7 +600,6 @@ enum {
600600
struct async_poll {
601601
struct io_poll_iocb poll;
602602
struct io_poll_iocb *double_poll;
603-
struct io_wq_work work;
604603
};
605604

606605
/*
@@ -641,36 +640,26 @@ struct io_kiocb {
641640
u16 buf_index;
642641
u32 result;
643642

644-
struct io_ring_ctx *ctx;
645-
unsigned int flags;
646-
refcount_t refs;
647-
struct task_struct *task;
648-
u64 user_data;
643+
struct io_ring_ctx *ctx;
644+
unsigned int flags;
645+
refcount_t refs;
646+
struct task_struct *task;
647+
u64 user_data;
649648

650-
struct list_head link_list;
649+
struct list_head link_list;
651650

652651
/*
653652
* 1. used with ctx->iopoll_list with reads/writes
654653
* 2. to track reqs with ->files (see io_op_def::file_table)
655654
*/
656-
struct list_head inflight_entry;
657-
658-
struct percpu_ref *fixed_file_refs;
659-
660-
union {
661-
/*
662-
* Only commands that never go async can use the below fields,
663-
* obviously. Right now only IORING_OP_POLL_ADD uses them, and
664-
* async armed poll handlers for regular commands. The latter
665-
* restore the work, if needed.
666-
*/
667-
struct {
668-
struct hlist_node hash_node;
669-
struct async_poll *apoll;
670-
};
671-
struct io_wq_work work;
672-
};
673-
struct callback_head task_work;
655+
struct list_head inflight_entry;
656+
657+
struct percpu_ref *fixed_file_refs;
658+
struct callback_head task_work;
659+
/* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
660+
struct hlist_node hash_node;
661+
struct async_poll *apoll;
662+
struct io_wq_work work;
674663
};
675664

676665
struct io_defer_entry {
@@ -4668,10 +4657,6 @@ static void io_async_task_func(struct callback_head *cb)
46684657
io_poll_remove_double(req, apoll->double_poll);
46694658
spin_unlock_irq(&ctx->completion_lock);
46704659

4671-
/* restore ->work in case we need to retry again */
4672-
if (req->flags & REQ_F_WORK_INITIALIZED)
4673-
memcpy(&req->work, &apoll->work, sizeof(req->work));
4674-
46754660
if (!READ_ONCE(apoll->poll.canceled))
46764661
__io_req_task_submit(req);
46774662
else
@@ -4763,9 +4748,6 @@ static bool io_arm_poll_handler(struct io_kiocb *req)
47634748
apoll->double_poll = NULL;
47644749

47654750
req->flags |= REQ_F_POLLED;
4766-
if (req->flags & REQ_F_WORK_INITIALIZED)
4767-
memcpy(&apoll->work, &req->work, sizeof(req->work));
4768-
47694751
io_get_req_task(req);
47704752
req->apoll = apoll;
47714753
INIT_HLIST_NODE(&req->hash_node);
@@ -4784,8 +4766,6 @@ static bool io_arm_poll_handler(struct io_kiocb *req)
47844766
if (ret) {
47854767
io_poll_remove_double(req, apoll->double_poll);
47864768
spin_unlock_irq(&ctx->completion_lock);
4787-
if (req->flags & REQ_F_WORK_INITIALIZED)
4788-
memcpy(&req->work, &apoll->work, sizeof(req->work));
47894769
kfree(apoll->double_poll);
47904770
kfree(apoll);
47914771
return false;
@@ -4828,14 +4808,6 @@ static bool io_poll_remove_one(struct io_kiocb *req)
48284808
do_complete = __io_poll_remove_one(req, &apoll->poll);
48294809
if (do_complete) {
48304810
io_put_req(req);
4831-
/*
4832-
* restore ->work because we will call
4833-
* io_req_clean_work below when dropping the
4834-
* final reference.
4835-
*/
4836-
if (req->flags & REQ_F_WORK_INITIALIZED)
4837-
memcpy(&req->work, &apoll->work,
4838-
sizeof(req->work));
48394811
kfree(apoll->double_poll);
48404812
kfree(apoll);
48414813
}
@@ -4969,9 +4941,6 @@ static int io_poll_add(struct io_kiocb *req)
49694941
struct io_poll_table ipt;
49704942
__poll_t mask;
49714943

4972-
/* ->work is in union with hash_node and others */
4973-
io_req_clean_work(req);
4974-
49754944
INIT_HLIST_NODE(&req->hash_node);
49764945
ipt.pt._qproc = io_poll_queue_proc;
49774946

0 commit comments

Comments
 (0)