Skip to content

Commit 0a78a37

Browse files
committed
Merge tag 'for-6.1/io_uring-2022-10-03' of git://git.kernel.dk/linux
Pull io_uring updates from Jens Axboe: - Add supported for more directly managed task_work running. This is beneficial for real world applications that end up issuing lots of system calls as part of handling work. Normal task_work will always execute as we transition in and out of the kernel, even for "unrelated" system calls. It's more efficient to defer the handling of io_uring's deferred work until the application wants it to be run, generally in batches. As part of ongoing work to write an io_uring network backend for Thrift, this has been shown to greatly improve performance. (Dylan) - Add IOPOLL support for passthrough (Kanchan) - Improvements and fixes to the send zero-copy support (Pavel) - Partial IO handling fixes (Pavel) - CQE ordering fixes around CQ ring overflow (Pavel) - Support sendto() for non-zc as well (Pavel) - Support sendmsg for zerocopy (Pavel) - Networking iov_iter fix (Stefan) - Misc fixes and cleanups (Pavel, me) * tag 'for-6.1/io_uring-2022-10-03' of git://git.kernel.dk/linux: (56 commits) io_uring/net: fix notif cqe reordering io_uring/net: don't update msg_name if not provided io_uring: don't gate task_work run on TIF_NOTIFY_SIGNAL io_uring/rw: defer fsnotify calls to task context io_uring/net: fix fast_iov assignment in io_setup_async_msg() io_uring/net: fix non-zc send with address io_uring/net: don't skip notifs for failed requests io_uring/rw: don't lose short results on io_setup_async_rw() io_uring/rw: fix unexpected link breakage io_uring/net: fix cleanup double free free_iov init io_uring: fix CQE reordering io_uring/net: fix UAF in io_sendrecv_fail() selftest/net: adjust io_uring sendzc notif handling io_uring: ensure local task_work marks task as running io_uring/net: zerocopy sendmsg io_uring/net: combine fail handlers io_uring/net: rename io_sendzc() io_uring/net: support non-zerocopy sendto io_uring/net: refactor io_setup_async_addr io_uring/net: don't lose partial send_zc on fail ...
2 parents 188943a + 108893d commit 0a78a37

File tree

30 files changed

+859
-326
lines changed

30 files changed

+859
-326
lines changed

block/blk-mq.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1233,7 +1233,7 @@ static void blk_end_sync_rq(struct request *rq, blk_status_t ret)
12331233
complete(&wait->done);
12341234
}
12351235

1236-
static bool blk_rq_is_poll(struct request *rq)
1236+
bool blk_rq_is_poll(struct request *rq)
12371237
{
12381238
if (!rq->mq_hctx)
12391239
return false;
@@ -1243,6 +1243,7 @@ static bool blk_rq_is_poll(struct request *rq)
12431243
return false;
12441244
return true;
12451245
}
1246+
EXPORT_SYMBOL_GPL(blk_rq_is_poll);
12461247

12471248
static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
12481249
{

drivers/nvme/host/core.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3976,6 +3976,7 @@ static const struct file_operations nvme_ns_chr_fops = {
39763976
.unlocked_ioctl = nvme_ns_chr_ioctl,
39773977
.compat_ioctl = compat_ptr_ioctl,
39783978
.uring_cmd = nvme_ns_chr_uring_cmd,
3979+
.uring_cmd_iopoll = nvme_ns_chr_uring_cmd_iopoll,
39793980
};
39803981

39813982
static int nvme_add_ns_cdev(struct nvme_ns *ns)

drivers/nvme/host/ioctl.c

Lines changed: 72 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -391,11 +391,19 @@ static void nvme_uring_cmd_end_io(struct request *req, blk_status_t err)
391391
struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
392392
/* extract bio before reusing the same field for request */
393393
struct bio *bio = pdu->bio;
394+
void *cookie = READ_ONCE(ioucmd->cookie);
394395

395396
pdu->req = req;
396397
req->bio = bio;
397-
/* this takes care of moving rest of completion-work to task context */
398-
io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_cb);
398+
399+
/*
400+
* For iopoll, complete it directly.
401+
* Otherwise, move the completion to task work.
402+
*/
403+
if (cookie != NULL && blk_rq_is_poll(req))
404+
nvme_uring_task_cb(ioucmd);
405+
else
406+
io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_cb);
399407
}
400408

401409
static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
@@ -445,7 +453,10 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
445453
rq_flags = REQ_NOWAIT;
446454
blk_flags = BLK_MQ_REQ_NOWAIT;
447455
}
456+
if (issue_flags & IO_URING_F_IOPOLL)
457+
rq_flags |= REQ_POLLED;
448458

459+
retry:
449460
req = nvme_alloc_user_request(q, &c, nvme_to_user_ptr(d.addr),
450461
d.data_len, nvme_to_user_ptr(d.metadata),
451462
d.metadata_len, 0, &meta, d.timeout_ms ?
@@ -456,6 +467,17 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
456467
req->end_io = nvme_uring_cmd_end_io;
457468
req->end_io_data = ioucmd;
458469

470+
if (issue_flags & IO_URING_F_IOPOLL && rq_flags & REQ_POLLED) {
471+
if (unlikely(!req->bio)) {
472+
/* we can't poll this, so alloc regular req instead */
473+
blk_mq_free_request(req);
474+
rq_flags &= ~REQ_POLLED;
475+
goto retry;
476+
} else {
477+
WRITE_ONCE(ioucmd->cookie, req->bio);
478+
req->bio->bi_opf |= REQ_POLLED;
479+
}
480+
}
459481
/* to free bio on completion, as req->bio will be null at that time */
460482
pdu->bio = req->bio;
461483
pdu->meta = meta;
@@ -559,9 +581,6 @@ long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
559581

560582
static int nvme_uring_cmd_checks(unsigned int issue_flags)
561583
{
562-
/* IOPOLL not supported yet */
563-
if (issue_flags & IO_URING_F_IOPOLL)
564-
return -EOPNOTSUPP;
565584

566585
/* NVMe passthrough requires big SQE/CQE support */
567586
if ((issue_flags & (IO_URING_F_SQE128|IO_URING_F_CQE32)) !=
@@ -604,6 +623,25 @@ int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
604623
return nvme_ns_uring_cmd(ns, ioucmd, issue_flags);
605624
}
606625

626+
int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
627+
struct io_comp_batch *iob,
628+
unsigned int poll_flags)
629+
{
630+
struct bio *bio;
631+
int ret = 0;
632+
struct nvme_ns *ns;
633+
struct request_queue *q;
634+
635+
rcu_read_lock();
636+
bio = READ_ONCE(ioucmd->cookie);
637+
ns = container_of(file_inode(ioucmd->file)->i_cdev,
638+
struct nvme_ns, cdev);
639+
q = ns->queue;
640+
if (test_bit(QUEUE_FLAG_POLL, &q->queue_flags) && bio && bio->bi_bdev)
641+
ret = bio_poll(bio, iob, poll_flags);
642+
rcu_read_unlock();
643+
return ret;
644+
}
607645
#ifdef CONFIG_NVME_MULTIPATH
608646
static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
609647
void __user *argp, struct nvme_ns_head *head, int srcu_idx)
@@ -685,13 +723,42 @@ int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd,
685723
srcu_read_unlock(&head->srcu, srcu_idx);
686724
return ret;
687725
}
726+
727+
int nvme_ns_head_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
728+
struct io_comp_batch *iob,
729+
unsigned int poll_flags)
730+
{
731+
struct cdev *cdev = file_inode(ioucmd->file)->i_cdev;
732+
struct nvme_ns_head *head = container_of(cdev, struct nvme_ns_head, cdev);
733+
int srcu_idx = srcu_read_lock(&head->srcu);
734+
struct nvme_ns *ns = nvme_find_path(head);
735+
struct bio *bio;
736+
int ret = 0;
737+
struct request_queue *q;
738+
739+
if (ns) {
740+
rcu_read_lock();
741+
bio = READ_ONCE(ioucmd->cookie);
742+
q = ns->queue;
743+
if (test_bit(QUEUE_FLAG_POLL, &q->queue_flags) && bio
744+
&& bio->bi_bdev)
745+
ret = bio_poll(bio, iob, poll_flags);
746+
rcu_read_unlock();
747+
}
748+
srcu_read_unlock(&head->srcu, srcu_idx);
749+
return ret;
750+
}
688751
#endif /* CONFIG_NVME_MULTIPATH */
689752

690753
int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
691754
{
692755
struct nvme_ctrl *ctrl = ioucmd->file->private_data;
693756
int ret;
694757

758+
/* IOPOLL not supported yet */
759+
if (issue_flags & IO_URING_F_IOPOLL)
760+
return -EOPNOTSUPP;
761+
695762
ret = nvme_uring_cmd_checks(issue_flags);
696763
if (ret)
697764
return ret;

drivers/nvme/host/multipath.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -439,6 +439,7 @@ static const struct file_operations nvme_ns_head_chr_fops = {
439439
.unlocked_ioctl = nvme_ns_head_chr_ioctl,
440440
.compat_ioctl = compat_ptr_ioctl,
441441
.uring_cmd = nvme_ns_head_chr_uring_cmd,
442+
.uring_cmd_iopoll = nvme_ns_head_chr_uring_cmd_iopoll,
442443
};
443444

444445
static int nvme_add_ns_head_cdev(struct nvme_ns_head *head)

drivers/nvme/host/nvme.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -821,6 +821,10 @@ long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
821821
unsigned long arg);
822822
long nvme_dev_ioctl(struct file *file, unsigned int cmd,
823823
unsigned long arg);
824+
int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
825+
struct io_comp_batch *iob, unsigned int poll_flags);
826+
int nvme_ns_head_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
827+
struct io_comp_batch *iob, unsigned int poll_flags);
824828
int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd,
825829
unsigned int issue_flags);
826830
int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd,

fs/eventfd.c

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -69,17 +69,17 @@ __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
6969
* it returns false, the eventfd_signal() call should be deferred to a
7070
* safe context.
7171
*/
72-
if (WARN_ON_ONCE(current->in_eventfd_signal))
72+
if (WARN_ON_ONCE(current->in_eventfd))
7373
return 0;
7474

7575
spin_lock_irqsave(&ctx->wqh.lock, flags);
76-
current->in_eventfd_signal = 1;
76+
current->in_eventfd = 1;
7777
if (ULLONG_MAX - ctx->count < n)
7878
n = ULLONG_MAX - ctx->count;
7979
ctx->count += n;
8080
if (waitqueue_active(&ctx->wqh))
8181
wake_up_locked_poll(&ctx->wqh, EPOLLIN);
82-
current->in_eventfd_signal = 0;
82+
current->in_eventfd = 0;
8383
spin_unlock_irqrestore(&ctx->wqh.lock, flags);
8484

8585
return n;
@@ -253,8 +253,10 @@ static ssize_t eventfd_read(struct kiocb *iocb, struct iov_iter *to)
253253
__set_current_state(TASK_RUNNING);
254254
}
255255
eventfd_ctx_do_read(ctx, &ucnt);
256+
current->in_eventfd = 1;
256257
if (waitqueue_active(&ctx->wqh))
257258
wake_up_locked_poll(&ctx->wqh, EPOLLOUT);
259+
current->in_eventfd = 0;
258260
spin_unlock_irq(&ctx->wqh.lock);
259261
if (unlikely(copy_to_iter(&ucnt, sizeof(ucnt), to) != sizeof(ucnt)))
260262
return -EFAULT;
@@ -301,8 +303,10 @@ static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t c
301303
}
302304
if (likely(res > 0)) {
303305
ctx->count += ucnt;
306+
current->in_eventfd = 1;
304307
if (waitqueue_active(&ctx->wqh))
305308
wake_up_locked_poll(&ctx->wqh, EPOLLIN);
309+
current->in_eventfd = 0;
306310
}
307311
spin_unlock_irq(&ctx->wqh.lock);
308312

include/linux/blk-mq.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -980,6 +980,7 @@ int blk_rq_map_kern(struct request_queue *, struct request *, void *,
980980
int blk_rq_append_bio(struct request *rq, struct bio *bio);
981981
void blk_execute_rq_nowait(struct request *rq, bool at_head);
982982
blk_status_t blk_execute_rq(struct request *rq, bool at_head);
983+
bool blk_rq_is_poll(struct request *rq);
983984

984985
struct req_iterator {
985986
struct bvec_iter iter;

include/linux/eventfd.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt);
4646

4747
static inline bool eventfd_signal_allowed(void)
4848
{
49-
return !current->in_eventfd_signal;
49+
return !current->in_eventfd;
5050
}
5151

5252
#else /* CONFIG_EVENTFD */

include/linux/fs.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2133,6 +2133,8 @@ struct file_operations {
21332133
loff_t len, unsigned int remap_flags);
21342134
int (*fadvise)(struct file *, loff_t, loff_t, int);
21352135
int (*uring_cmd)(struct io_uring_cmd *ioucmd, unsigned int issue_flags);
2136+
int (*uring_cmd_iopoll)(struct io_uring_cmd *, struct io_comp_batch *,
2137+
unsigned int poll_flags);
21362138
} __randomize_layout;
21372139

21382140
struct inode_operations {

include/linux/io_uring.h

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,8 +20,12 @@ enum io_uring_cmd_flags {
2020
struct io_uring_cmd {
2121
struct file *file;
2222
const void *cmd;
23-
/* callback to defer completions to task context */
24-
void (*task_work_cb)(struct io_uring_cmd *cmd);
23+
union {
24+
/* callback to defer completions to task context */
25+
void (*task_work_cb)(struct io_uring_cmd *cmd);
26+
/* used for polled completion */
27+
void *cookie;
28+
};
2529
u32 cmd_op;
2630
u32 pad;
2731
u8 pdu[32]; /* available inline for free use */

0 commit comments

Comments
 (0)