Skip to content

Commit 7fbb1b5

Browse files
isilenceaxboe
authored andcommitted
io_uring: don't open-code recv kbuf managment
Don't implement fast path of kbuf freeing and management inlined into io_recv{,msg}(), that's error prone and duplicates handling. Replace it with a helper io_put_recv_kbuf(), which mimics io_put_rw_kbuf() in the io_read/write(). This also keeps cflags calculation in one place, removing duplication between rw and recv/send. Signed-off-by: Pavel Begunkov <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
1 parent 8ff069b commit 7fbb1b5

File tree

1 file changed

+14
-14
lines changed

1 file changed

+14
-14
lines changed

fs/io_uring.c

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -4098,7 +4098,7 @@ static int io_recvmsg_copy_hdr(struct io_kiocb *req,
40984098
}
40994099

41004100
static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
4101-
int *cflags, bool needs_lock)
4101+
bool needs_lock)
41024102
{
41034103
struct io_sr_msg *sr = &req->sr_msg;
41044104
struct io_buffer *kbuf;
@@ -4109,12 +4109,14 @@ static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
41094109

41104110
sr->kbuf = kbuf;
41114111
req->flags |= REQ_F_BUFFER_SELECTED;
4112-
4113-
*cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
4114-
*cflags |= IORING_CQE_F_BUFFER;
41154112
return kbuf;
41164113
}
41174114

4115+
static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req)
4116+
{
4117+
return io_put_kbuf(req, req->sr_msg.kbuf);
4118+
}
4119+
41184120
static int io_recvmsg_prep(struct io_kiocb *req,
41194121
const struct io_uring_sqe *sqe)
41204122
{
@@ -4152,7 +4154,7 @@ static int io_recvmsg(struct io_kiocb *req, bool force_nonblock,
41524154
{
41534155
struct io_async_msghdr iomsg, *kmsg;
41544156
struct socket *sock;
4155-
struct io_buffer *kbuf = NULL;
4157+
struct io_buffer *kbuf;
41564158
unsigned flags;
41574159
int ret, cflags = 0;
41584160

@@ -4175,7 +4177,7 @@ static int io_recvmsg(struct io_kiocb *req, bool force_nonblock,
41754177
}
41764178

41774179
if (req->flags & REQ_F_BUFFER_SELECT) {
4178-
kbuf = io_recv_buffer_select(req, &cflags, !force_nonblock);
4180+
kbuf = io_recv_buffer_select(req, !force_nonblock);
41794181
if (IS_ERR(kbuf))
41804182
return PTR_ERR(kbuf);
41814183
kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
@@ -4196,12 +4198,11 @@ static int io_recvmsg(struct io_kiocb *req, bool force_nonblock,
41964198
if (ret == -ERESTARTSYS)
41974199
ret = -EINTR;
41984200

4199-
if (kbuf)
4200-
kfree(kbuf);
4201+
if (req->flags & REQ_F_BUFFER_SELECTED)
4202+
cflags = io_put_recv_kbuf(req);
42014203
if (kmsg->iov != kmsg->fast_iov)
42024204
kfree(kmsg->iov);
4203-
req->flags &= ~(REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED);
4204-
4205+
req->flags &= ~REQ_F_NEED_CLEANUP;
42054206
if (ret < 0)
42064207
req_set_fail_links(req);
42074208
__io_req_complete(req, ret, cflags, cs);
@@ -4225,7 +4226,7 @@ static int io_recv(struct io_kiocb *req, bool force_nonblock,
42254226
return ret;
42264227

42274228
if (req->flags & REQ_F_BUFFER_SELECT) {
4228-
kbuf = io_recv_buffer_select(req, &cflags, !force_nonblock);
4229+
kbuf = io_recv_buffer_select(req, !force_nonblock);
42294230
if (IS_ERR(kbuf))
42304231
return PTR_ERR(kbuf);
42314232
buf = u64_to_user_ptr(kbuf->addr);
@@ -4254,9 +4255,8 @@ static int io_recv(struct io_kiocb *req, bool force_nonblock,
42544255
if (ret == -ERESTARTSYS)
42554256
ret = -EINTR;
42564257
out_free:
4257-
if (kbuf)
4258-
kfree(kbuf);
4259-
req->flags &= ~REQ_F_NEED_CLEANUP;
4258+
if (req->flags & REQ_F_BUFFER_SELECTED)
4259+
cflags = io_put_recv_kbuf(req);
42604260
if (ret < 0)
42614261
req_set_fail_links(req);
42624262
__io_req_complete(req, ret, cflags, cs);

0 commit comments

Comments
 (0)