Skip to content

Commit fa15baf

Browse files
isilenceaxboe
authored andcommitted
io_uring: flip if handling after io_setup_async_rw
As recently done with with send/recv, flip the if after rw_verify_aread() in io_{read,write}() and tabulise left bits left. This removes mispredicted by a compiler jump on the success/fast path. Signed-off-by: Pavel Begunkov <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
1 parent 1752f0a commit fa15baf

File tree

1 file changed

+72
-74
lines changed

1 file changed

+72
-74
lines changed

fs/io_uring.c

Lines changed: 72 additions & 74 deletions
Original file line numberDiff line numberDiff line change
@@ -3034,57 +3034,56 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
30343034
struct kiocb *kiocb = &req->rw.kiocb;
30353035
struct iov_iter iter;
30363036
size_t iov_count;
3037-
ssize_t io_size, ret;
3037+
ssize_t io_size, ret, ret2;
3038+
unsigned long nr_segs;
30383039

30393040
ret = io_import_iovec(READ, req, &iovec, &iter, !force_nonblock);
30403041
if (ret < 0)
30413042
return ret;
3043+
io_size = ret;
3044+
req->result = io_size;
30423045

30433046
/* Ensure we clear previously set non-block flag */
30443047
if (!force_nonblock)
30453048
kiocb->ki_flags &= ~IOCB_NOWAIT;
30463049

3047-
io_size = ret;
3048-
req->result = io_size;
3049-
30503050
/* If the file doesn't support async, just async punt */
30513051
if (force_nonblock && !io_file_supports_async(req->file, READ))
30523052
goto copy_iov;
30533053

30543054
iov_count = iov_iter_count(&iter);
3055+
nr_segs = iter.nr_segs;
30553056
ret = rw_verify_area(READ, req->file, &kiocb->ki_pos, iov_count);
3056-
if (!ret) {
3057-
unsigned long nr_segs = iter.nr_segs;
3058-
ssize_t ret2 = 0;
3057+
if (unlikely(ret))
3058+
goto out_free;
30593059

3060-
ret2 = io_iter_do_read(req, &iter);
3060+
ret2 = io_iter_do_read(req, &iter);
30613061

3062-
/* Catch -EAGAIN return for forced non-blocking submission */
3063-
if (!force_nonblock || (ret2 != -EAGAIN && ret2 != -EIO)) {
3064-
kiocb_done(kiocb, ret2, cs);
3065-
} else {
3066-
iter.count = iov_count;
3067-
iter.nr_segs = nr_segs;
3062+
/* Catch -EAGAIN return for forced non-blocking submission */
3063+
if (!force_nonblock || (ret2 != -EAGAIN && ret2 != -EIO)) {
3064+
kiocb_done(kiocb, ret2, cs);
3065+
} else {
3066+
iter.count = iov_count;
3067+
iter.nr_segs = nr_segs;
30683068
copy_iov:
3069-
ret = io_setup_async_rw(req, io_size, iovec,
3070-
inline_vecs, &iter);
3071-
if (ret)
3069+
ret = io_setup_async_rw(req, io_size, iovec, inline_vecs,
3070+
&iter);
3071+
if (ret)
3072+
goto out_free;
3073+
/* it's copied and will be cleaned with ->io */
3074+
iovec = NULL;
3075+
/* if we can retry, do so with the callbacks armed */
3076+
if (io_rw_should_retry(req)) {
3077+
ret2 = io_iter_do_read(req, &iter);
3078+
if (ret2 == -EIOCBQUEUED) {
3079+
goto out_free;
3080+
} else if (ret2 != -EAGAIN) {
3081+
kiocb_done(kiocb, ret2, cs);
30723082
goto out_free;
3073-
/* it's copied and will be cleaned with ->io */
3074-
iovec = NULL;
3075-
/* if we can retry, do so with the callbacks armed */
3076-
if (io_rw_should_retry(req)) {
3077-
ret2 = io_iter_do_read(req, &iter);
3078-
if (ret2 == -EIOCBQUEUED) {
3079-
goto out_free;
3080-
} else if (ret2 != -EAGAIN) {
3081-
kiocb_done(kiocb, ret2, cs);
3082-
goto out_free;
3083-
}
30843083
}
3085-
kiocb->ki_flags &= ~IOCB_WAITQ;
3086-
return -EAGAIN;
30873084
}
3085+
kiocb->ki_flags &= ~IOCB_WAITQ;
3086+
return -EAGAIN;
30883087
}
30893088
out_free:
30903089
if (iovec)
@@ -3117,19 +3116,19 @@ static int io_write(struct io_kiocb *req, bool force_nonblock,
31173116
struct kiocb *kiocb = &req->rw.kiocb;
31183117
struct iov_iter iter;
31193118
size_t iov_count;
3120-
ssize_t ret, io_size;
3119+
ssize_t ret, ret2, io_size;
3120+
unsigned long nr_segs;
31213121

31223122
ret = io_import_iovec(WRITE, req, &iovec, &iter, !force_nonblock);
31233123
if (ret < 0)
31243124
return ret;
3125+
io_size = ret;
3126+
req->result = io_size;
31253127

31263128
/* Ensure we clear previously set non-block flag */
31273129
if (!force_nonblock)
31283130
req->rw.kiocb.ki_flags &= ~IOCB_NOWAIT;
31293131

3130-
io_size = ret;
3131-
req->result = io_size;
3132-
31333132
/* If the file doesn't support async, just async punt */
31343133
if (force_nonblock && !io_file_supports_async(req->file, WRITE))
31353134
goto copy_iov;
@@ -3140,51 +3139,50 @@ static int io_write(struct io_kiocb *req, bool force_nonblock,
31403139
goto copy_iov;
31413140

31423141
iov_count = iov_iter_count(&iter);
3142+
nr_segs = iter.nr_segs;
31433143
ret = rw_verify_area(WRITE, req->file, &kiocb->ki_pos, iov_count);
3144-
if (!ret) {
3145-
unsigned long nr_segs = iter.nr_segs;
3146-
ssize_t ret2;
3144+
if (unlikely(ret))
3145+
goto out_free;
31473146

3148-
/*
3149-
* Open-code file_start_write here to grab freeze protection,
3150-
* which will be released by another thread in
3151-
* io_complete_rw(). Fool lockdep by telling it the lock got
3152-
* released so that it doesn't complain about the held lock when
3153-
* we return to userspace.
3154-
*/
3155-
if (req->flags & REQ_F_ISREG) {
3156-
__sb_start_write(file_inode(req->file)->i_sb,
3157-
SB_FREEZE_WRITE, true);
3158-
__sb_writers_release(file_inode(req->file)->i_sb,
3159-
SB_FREEZE_WRITE);
3160-
}
3161-
kiocb->ki_flags |= IOCB_WRITE;
3147+
/*
3148+
* Open-code file_start_write here to grab freeze protection,
3149+
* which will be released by another thread in
3150+
* io_complete_rw(). Fool lockdep by telling it the lock got
3151+
* released so that it doesn't complain about the held lock when
3152+
* we return to userspace.
3153+
*/
3154+
if (req->flags & REQ_F_ISREG) {
3155+
__sb_start_write(file_inode(req->file)->i_sb,
3156+
SB_FREEZE_WRITE, true);
3157+
__sb_writers_release(file_inode(req->file)->i_sb,
3158+
SB_FREEZE_WRITE);
3159+
}
3160+
kiocb->ki_flags |= IOCB_WRITE;
31623161

3163-
if (req->file->f_op->write_iter)
3164-
ret2 = call_write_iter(req->file, kiocb, &iter);
3165-
else
3166-
ret2 = loop_rw_iter(WRITE, req->file, kiocb, &iter);
3162+
if (req->file->f_op->write_iter)
3163+
ret2 = call_write_iter(req->file, kiocb, &iter);
3164+
else
3165+
ret2 = loop_rw_iter(WRITE, req->file, kiocb, &iter);
31673166

3168-
/*
3169-
* Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
3170-
* retry them without IOCB_NOWAIT.
3171-
*/
3172-
if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
3173-
ret2 = -EAGAIN;
3174-
if (!force_nonblock || ret2 != -EAGAIN) {
3175-
kiocb_done(kiocb, ret2, cs);
3176-
} else {
3177-
iter.count = iov_count;
3178-
iter.nr_segs = nr_segs;
3167+
/*
3168+
* Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
3169+
* retry them without IOCB_NOWAIT.
3170+
*/
3171+
if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
3172+
ret2 = -EAGAIN;
3173+
if (!force_nonblock || ret2 != -EAGAIN) {
3174+
kiocb_done(kiocb, ret2, cs);
3175+
} else {
3176+
iter.count = iov_count;
3177+
iter.nr_segs = nr_segs;
31793178
copy_iov:
3180-
ret = io_setup_async_rw(req, io_size, iovec,
3181-
inline_vecs, &iter);
3182-
if (ret)
3183-
goto out_free;
3184-
/* it's copied and will be cleaned with ->io */
3185-
iovec = NULL;
3186-
return -EAGAIN;
3187-
}
3179+
ret = io_setup_async_rw(req, io_size, iovec, inline_vecs,
3180+
&iter);
3181+
if (ret)
3182+
goto out_free;
3183+
/* it's copied and will be cleaned with ->io */
3184+
iovec = NULL;
3185+
return -EAGAIN;
31883186
}
31893187
out_free:
31903188
if (iovec)

0 commit comments

Comments
 (0)