Skip to content

Commit 178b8ff

Browse files
committed
io_uring/kbuf: flag partial buffer mappings
A previous commit aborted mapping more for a non-incremental ring for bundle peeking, but depending on where in the process this peeking happened, it would not necessarily prevent a retry by the user. That can create gaps in the received/read data. Add struct buf_sel_arg->partial_map, which can pass this information back. The networking side can then map that to internal state and use it to gate retry as well. Since this necessitates a new flag, change io_sr_msg->retry to a retry_flags member, and store both the retry and partial map condition in there. Cc: [email protected] Fixes: 26ec15e ("io_uring/kbuf: don't truncate end buffer for multiple buffer peeks") Signed-off-by: Jens Axboe <[email protected]>
1 parent 9a709b7 commit 178b8ff

File tree

3 files changed

+18
-9
lines changed

3 files changed

+18
-9
lines changed

io_uring/kbuf.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -271,6 +271,7 @@ static int io_ring_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg,
271271
if (len > arg->max_len) {
272272
len = arg->max_len;
273273
if (!(bl->flags & IOBL_INC)) {
274+
arg->partial_map = 1;
274275
if (iov != arg->iovs)
275276
break;
276277
buf->len = len;

io_uring/kbuf.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,8 @@ struct buf_sel_arg {
5858
size_t max_len;
5959
unsigned short nr_iovs;
6060
unsigned short mode;
61-
unsigned buf_group;
61+
unsigned short buf_group;
62+
unsigned short partial_map;
6263
};
6364

6465
void __user *io_buffer_select(struct io_kiocb *req, size_t *len,

io_uring/net.c

Lines changed: 15 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -75,12 +75,17 @@ struct io_sr_msg {
7575
u16 flags;
7676
/* initialised and used only by !msg send variants */
7777
u16 buf_group;
78-
bool retry;
78+
unsigned short retry_flags;
7979
void __user *msg_control;
8080
/* used only for send zerocopy */
8181
struct io_kiocb *notif;
8282
};
8383

84+
enum sr_retry_flags {
85+
IO_SR_MSG_RETRY = 1,
86+
IO_SR_MSG_PARTIAL_MAP = 2,
87+
};
88+
8489
/*
8590
* Number of times we'll try and do receives if there's more data. If we
8691
* exceed this limit, then add us to the back of the queue and retry from
@@ -187,7 +192,7 @@ static inline void io_mshot_prep_retry(struct io_kiocb *req,
187192

188193
req->flags &= ~REQ_F_BL_EMPTY;
189194
sr->done_io = 0;
190-
sr->retry = false;
195+
sr->retry_flags = 0;
191196
sr->len = 0; /* get from the provided buffer */
192197
}
193198

@@ -397,7 +402,7 @@ int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
397402
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
398403

399404
sr->done_io = 0;
400-
sr->retry = false;
405+
sr->retry_flags = 0;
401406
sr->len = READ_ONCE(sqe->len);
402407
sr->flags = READ_ONCE(sqe->ioprio);
403408
if (sr->flags & ~SENDMSG_FLAGS)
@@ -751,7 +756,7 @@ int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
751756
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
752757

753758
sr->done_io = 0;
754-
sr->retry = false;
759+
sr->retry_flags = 0;
755760

756761
if (unlikely(sqe->file_index || sqe->addr2))
757762
return -EINVAL;
@@ -823,7 +828,7 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
823828

824829
cflags |= io_put_kbufs(req, this_ret, io_bundle_nbufs(kmsg, this_ret),
825830
issue_flags);
826-
if (sr->retry)
831+
if (sr->retry_flags & IO_SR_MSG_RETRY)
827832
cflags = req->cqe.flags | (cflags & CQE_F_MASK);
828833
/* bundle with no more immediate buffers, we're done */
829834
if (req->flags & REQ_F_BL_EMPTY)
@@ -832,12 +837,12 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
832837
* If more is available AND it was a full transfer, retry and
833838
* append to this one
834839
*/
835-
if (!sr->retry && kmsg->msg.msg_inq > 1 && this_ret > 0 &&
840+
if (!sr->retry_flags && kmsg->msg.msg_inq > 1 && this_ret > 0 &&
836841
!iov_iter_count(&kmsg->msg.msg_iter)) {
837842
req->cqe.flags = cflags & ~CQE_F_MASK;
838843
sr->len = kmsg->msg.msg_inq;
839844
sr->done_io += this_ret;
840-
sr->retry = true;
845+
sr->retry_flags |= IO_SR_MSG_RETRY;
841846
return false;
842847
}
843848
} else {
@@ -1082,6 +1087,8 @@ static int io_recv_buf_select(struct io_kiocb *req, struct io_async_msghdr *kmsg
10821087
kmsg->vec.iovec = arg.iovs;
10831088
req->flags |= REQ_F_NEED_CLEANUP;
10841089
}
1090+
if (arg.partial_map)
1091+
sr->retry_flags |= IO_SR_MSG_PARTIAL_MAP;
10851092

10861093
/* special case 1 vec, can be a fast path */
10871094
if (ret == 1) {
@@ -1276,7 +1283,7 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
12761283
int ret;
12771284

12781285
zc->done_io = 0;
1279-
zc->retry = false;
1286+
zc->retry_flags = 0;
12801287

12811288
if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
12821289
return -EINVAL;

0 commit comments

Comments
 (0)