Skip to content

Commit 41b70df

Browse files
committed
io_uring/net: commit partial buffers on retry
Ring provided buffers are potentially only valid within the single execution context in which they were acquired. io_uring deals with this and invalidates them on retry. But on the networking side, if MSG_WAITALL is set, or if the socket is of the streaming type and too little was processed, then it will hang on to the buffer rather than recycle or commit it. This is problematic for two reasons: 1) If someone unregisters the provided buffer ring before a later retry, then the req->buf_list will no longer be valid. 2) If multiple sockers are using the same buffer group, then multiple receives can consume the same memory. This can cause data corruption in the application, as either receive could land in the same userspace buffer. Fix this by disallowing partial retries from pinning a provided buffer across multiple executions, if ring provided buffers are used. Cc: [email protected] Reported-by: pt x <[email protected]> Fixes: c56e022 ("io_uring: add support for user mapped provided buffer ring") Signed-off-by: Jens Axboe <[email protected]>
1 parent 8f5ae30 commit 41b70df

File tree

1 file changed

+15
-12
lines changed

1 file changed

+15
-12
lines changed

io_uring/net.c

Lines changed: 15 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -494,6 +494,15 @@ static int io_bundle_nbufs(struct io_async_msghdr *kmsg, int ret)
494494
return nbufs;
495495
}
496496

497+
static int io_net_kbuf_recyle(struct io_kiocb *req,
498+
struct io_async_msghdr *kmsg, int len)
499+
{
500+
req->flags |= REQ_F_BL_NO_RECYCLE;
501+
if (req->flags & REQ_F_BUFFERS_COMMIT)
502+
io_kbuf_commit(req, req->buf_list, len, io_bundle_nbufs(kmsg, len));
503+
return IOU_RETRY;
504+
}
505+
497506
static inline bool io_send_finish(struct io_kiocb *req, int *ret,
498507
struct io_async_msghdr *kmsg,
499508
unsigned issue_flags)
@@ -562,8 +571,7 @@ int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
562571
kmsg->msg.msg_controllen = 0;
563572
kmsg->msg.msg_control = NULL;
564573
sr->done_io += ret;
565-
req->flags |= REQ_F_BL_NO_RECYCLE;
566-
return -EAGAIN;
574+
return io_net_kbuf_recyle(req, kmsg, ret);
567575
}
568576
if (ret == -ERESTARTSYS)
569577
ret = -EINTR;
@@ -674,8 +682,7 @@ int io_send(struct io_kiocb *req, unsigned int issue_flags)
674682
sr->len -= ret;
675683
sr->buf += ret;
676684
sr->done_io += ret;
677-
req->flags |= REQ_F_BL_NO_RECYCLE;
678-
return -EAGAIN;
685+
return io_net_kbuf_recyle(req, kmsg, ret);
679686
}
680687
if (ret == -ERESTARTSYS)
681688
ret = -EINTR;
@@ -1071,8 +1078,7 @@ int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
10711078
}
10721079
if (ret > 0 && io_net_retry(sock, flags)) {
10731080
sr->done_io += ret;
1074-
req->flags |= REQ_F_BL_NO_RECYCLE;
1075-
return IOU_RETRY;
1081+
return io_net_kbuf_recyle(req, kmsg, ret);
10761082
}
10771083
if (ret == -ERESTARTSYS)
10781084
ret = -EINTR;
@@ -1218,8 +1224,7 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags)
12181224
sr->len -= ret;
12191225
sr->buf += ret;
12201226
sr->done_io += ret;
1221-
req->flags |= REQ_F_BL_NO_RECYCLE;
1222-
return -EAGAIN;
1227+
return io_net_kbuf_recyle(req, kmsg, ret);
12231228
}
12241229
if (ret == -ERESTARTSYS)
12251230
ret = -EINTR;
@@ -1500,8 +1505,7 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
15001505
zc->len -= ret;
15011506
zc->buf += ret;
15021507
zc->done_io += ret;
1503-
req->flags |= REQ_F_BL_NO_RECYCLE;
1504-
return -EAGAIN;
1508+
return io_net_kbuf_recyle(req, kmsg, ret);
15051509
}
15061510
if (ret == -ERESTARTSYS)
15071511
ret = -EINTR;
@@ -1571,8 +1575,7 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
15711575

15721576
if (ret > 0 && io_net_retry(sock, flags)) {
15731577
sr->done_io += ret;
1574-
req->flags |= REQ_F_BL_NO_RECYCLE;
1575-
return -EAGAIN;
1578+
return io_net_kbuf_recyle(req, kmsg, ret);
15761579
}
15771580
if (ret == -ERESTARTSYS)
15781581
ret = -EINTR;

0 commit comments

Comments
 (0)