Skip to content

Commit 6733e67

Browse files
committed
io_uring/kbuf: pass in 'len' argument for buffer commit
In preparation for needing the consumed length, pass in the length being completed. Unused right now, but will be used when it is possible to partially consume a buffer. Signed-off-by: Jens Axboe <[email protected]>
1 parent 641a681 commit 6733e67

File tree

5 files changed

+31
-30
lines changed

5 files changed

+31
-30
lines changed

io_uring/io_uring.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -904,7 +904,7 @@ void io_req_defer_failed(struct io_kiocb *req, s32 res)
904904
lockdep_assert_held(&req->ctx->uring_lock);
905905

906906
req_set_fail(req);
907-
io_req_set_res(req, res, io_put_kbuf(req, IO_URING_F_UNLOCKED));
907+
io_req_set_res(req, res, io_put_kbuf(req, res, IO_URING_F_UNLOCKED));
908908
if (def->fail)
909909
def->fail(req);
910910
io_req_complete_defer(req);

io_uring/kbuf.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
7070
return true;
7171
}
7272

73-
void __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags)
73+
void __io_put_kbuf(struct io_kiocb *req, int len, unsigned issue_flags)
7474
{
7575
/*
7676
* We can add this buffer back to two lists:
@@ -88,12 +88,12 @@ void __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags)
8888
struct io_ring_ctx *ctx = req->ctx;
8989

9090
spin_lock(&ctx->completion_lock);
91-
__io_put_kbuf_list(req, &ctx->io_buffers_comp);
91+
__io_put_kbuf_list(req, len, &ctx->io_buffers_comp);
9292
spin_unlock(&ctx->completion_lock);
9393
} else {
9494
lockdep_assert_held(&req->ctx->uring_lock);
9595

96-
__io_put_kbuf_list(req, &req->ctx->io_buffers_cache);
96+
__io_put_kbuf_list(req, len, &req->ctx->io_buffers_cache);
9797
}
9898
}
9999

@@ -165,7 +165,7 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
165165
* the transfer completes (or if we get -EAGAIN and must poll of
166166
* retry).
167167
*/
168-
io_kbuf_commit(req, bl, 1);
168+
io_kbuf_commit(req, bl, *len, 1);
169169
req->buf_list = NULL;
170170
}
171171
return u64_to_user_ptr(buf->addr);
@@ -291,7 +291,7 @@ int io_buffers_select(struct io_kiocb *req, struct buf_sel_arg *arg,
291291
*/
292292
if (ret > 0) {
293293
req->flags |= REQ_F_BUFFERS_COMMIT | REQ_F_BL_NO_RECYCLE;
294-
io_kbuf_commit(req, bl, ret);
294+
io_kbuf_commit(req, bl, arg->out_len, ret);
295295
}
296296
} else {
297297
ret = io_provided_buffers_select(req, &arg->out_len, bl, arg->iovs);

io_uring/kbuf.h

Lines changed: 17 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
7777
int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
7878
int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg);
7979

80-
void __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
80+
void __io_put_kbuf(struct io_kiocb *req, int len, unsigned issue_flags);
8181

8282
bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
8383

@@ -125,30 +125,30 @@ static inline bool io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
125125
#define io_ring_head_to_buf(br, head, mask) &(br)->bufs[(head) & (mask)]
126126

127127
static inline void io_kbuf_commit(struct io_kiocb *req,
128-
struct io_buffer_list *bl, int nr)
128+
struct io_buffer_list *bl, int len, int nr)
129129
{
130130
if (unlikely(!(req->flags & REQ_F_BUFFERS_COMMIT)))
131131
return;
132132
bl->head += nr;
133133
req->flags &= ~REQ_F_BUFFERS_COMMIT;
134134
}
135135

136-
static inline void __io_put_kbuf_ring(struct io_kiocb *req, int nr)
136+
static inline void __io_put_kbuf_ring(struct io_kiocb *req, int len, int nr)
137137
{
138138
struct io_buffer_list *bl = req->buf_list;
139139

140140
if (bl) {
141-
io_kbuf_commit(req, bl, nr);
141+
io_kbuf_commit(req, bl, len, nr);
142142
req->buf_index = bl->bgid;
143143
}
144144
req->flags &= ~REQ_F_BUFFER_RING;
145145
}
146146

147-
static inline void __io_put_kbuf_list(struct io_kiocb *req,
147+
static inline void __io_put_kbuf_list(struct io_kiocb *req, int len,
148148
struct list_head *list)
149149
{
150150
if (req->flags & REQ_F_BUFFER_RING) {
151-
__io_put_kbuf_ring(req, 1);
151+
__io_put_kbuf_ring(req, len, 1);
152152
} else {
153153
req->buf_index = req->kbuf->bgid;
154154
list_add(&req->kbuf->list, list);
@@ -163,11 +163,12 @@ static inline void io_kbuf_drop(struct io_kiocb *req)
163163
if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
164164
return;
165165

166-
__io_put_kbuf_list(req, &req->ctx->io_buffers_comp);
166+
/* len == 0 is fine here, non-ring will always drop all of it */
167+
__io_put_kbuf_list(req, 0, &req->ctx->io_buffers_comp);
167168
}
168169

169-
static inline unsigned int __io_put_kbufs(struct io_kiocb *req, int nbufs,
170-
unsigned issue_flags)
170+
static inline unsigned int __io_put_kbufs(struct io_kiocb *req, int len,
171+
int nbufs, unsigned issue_flags)
171172
{
172173
unsigned int ret;
173174

@@ -176,21 +177,21 @@ static inline unsigned int __io_put_kbufs(struct io_kiocb *req, int nbufs,
176177

177178
ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT);
178179
if (req->flags & REQ_F_BUFFER_RING)
179-
__io_put_kbuf_ring(req, nbufs);
180+
__io_put_kbuf_ring(req, len, nbufs);
180181
else
181-
__io_put_kbuf(req, issue_flags);
182+
__io_put_kbuf(req, len, issue_flags);
182183
return ret;
183184
}
184185

185-
static inline unsigned int io_put_kbuf(struct io_kiocb *req,
186+
static inline unsigned int io_put_kbuf(struct io_kiocb *req, int len,
186187
unsigned issue_flags)
187188
{
188-
return __io_put_kbufs(req, 1, issue_flags);
189+
return __io_put_kbufs(req, len, 1, issue_flags);
189190
}
190191

191-
static inline unsigned int io_put_kbufs(struct io_kiocb *req, int nbufs,
192-
unsigned issue_flags)
192+
static inline unsigned int io_put_kbufs(struct io_kiocb *req, int len,
193+
int nbufs, unsigned issue_flags)
193194
{
194-
return __io_put_kbufs(req, nbufs, issue_flags);
195+
return __io_put_kbufs(req, len, nbufs, issue_flags);
195196
}
196197
#endif

io_uring/net.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -497,11 +497,11 @@ static inline bool io_send_finish(struct io_kiocb *req, int *ret,
497497
unsigned int cflags;
498498

499499
if (!(sr->flags & IORING_RECVSEND_BUNDLE)) {
500-
cflags = io_put_kbuf(req, issue_flags);
500+
cflags = io_put_kbuf(req, *ret, issue_flags);
501501
goto finish;
502502
}
503503

504-
cflags = io_put_kbufs(req, io_bundle_nbufs(kmsg, *ret), issue_flags);
504+
cflags = io_put_kbufs(req, *ret, io_bundle_nbufs(kmsg, *ret), issue_flags);
505505

506506
if (bundle_finished || req->flags & REQ_F_BL_EMPTY)
507507
goto finish;
@@ -842,13 +842,13 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
842842
cflags |= IORING_CQE_F_SOCK_NONEMPTY;
843843

844844
if (sr->flags & IORING_RECVSEND_BUNDLE) {
845-
cflags |= io_put_kbufs(req, io_bundle_nbufs(kmsg, *ret),
845+
cflags |= io_put_kbufs(req, *ret, io_bundle_nbufs(kmsg, *ret),
846846
issue_flags);
847847
/* bundle with no more immediate buffers, we're done */
848848
if (req->flags & REQ_F_BL_EMPTY)
849849
goto finish;
850850
} else {
851-
cflags |= io_put_kbuf(req, issue_flags);
851+
cflags |= io_put_kbuf(req, *ret, issue_flags);
852852
}
853853

854854
/*

io_uring/rw.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -511,7 +511,7 @@ void io_req_rw_complete(struct io_kiocb *req, struct io_tw_state *ts)
511511
io_req_io_end(req);
512512

513513
if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING))
514-
req->cqe.flags |= io_put_kbuf(req, 0);
514+
req->cqe.flags |= io_put_kbuf(req, req->cqe.res, 0);
515515

516516
io_req_rw_cleanup(req, 0);
517517
io_req_task_complete(req, ts);
@@ -593,7 +593,7 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret,
593593
*/
594594
io_req_io_end(req);
595595
io_req_set_res(req, final_ret,
596-
io_put_kbuf(req, issue_flags));
596+
io_put_kbuf(req, ret, issue_flags));
597597
io_req_rw_cleanup(req, issue_flags);
598598
return IOU_OK;
599599
}
@@ -975,7 +975,7 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
975975
* Put our buffer and post a CQE. If we fail to post a CQE, then
976976
* jump to the termination path. This request is then done.
977977
*/
978-
cflags = io_put_kbuf(req, issue_flags);
978+
cflags = io_put_kbuf(req, ret, issue_flags);
979979
rw->len = 0; /* similarly to above, reset len to 0 */
980980

981981
if (io_req_post_cqe(req, ret, cflags | IORING_CQE_F_MORE)) {
@@ -1167,7 +1167,7 @@ int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
11671167
if (!smp_load_acquire(&req->iopoll_completed))
11681168
break;
11691169
nr_events++;
1170-
req->cqe.flags = io_put_kbuf(req, 0);
1170+
req->cqe.flags = io_put_kbuf(req, req->cqe.res, 0);
11711171
if (req->opcode != IORING_OP_URING_CMD)
11721172
io_req_rw_cleanup(req, 0);
11731173
}

0 commit comments

Comments
 (0)