Skip to content

Commit 5af1d13

Browse files
isilenceaxboe
authored andcommitted
io_uring: batch put_task_struct()
As every iopoll request have a task ref, it becomes expensive to put them one by one, instead we can put several at once integrating that into io_req_free_batch(). Signed-off-by: Pavel Begunkov <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
1 parent dd6f843 commit 5af1d13

File tree

1 file changed

+27
-2
lines changed

1 file changed

+27
-2
lines changed

fs/io_uring.c

Lines changed: 27 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1544,7 +1544,6 @@ static void io_dismantle_req(struct io_kiocb *req)
15441544
kfree(req->io);
15451545
if (req->file)
15461546
io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE));
1547-
__io_put_req_task(req);
15481547
io_req_clean_work(req);
15491548

15501549
if (req->flags & REQ_F_INFLIGHT) {
@@ -1564,6 +1563,7 @@ static void __io_free_req(struct io_kiocb *req)
15641563
struct io_ring_ctx *ctx;
15651564

15661565
io_dismantle_req(req);
1566+
__io_put_req_task(req);
15671567
ctx = req->ctx;
15681568
if (likely(!io_is_fallback_req(req)))
15691569
kmem_cache_free(req_cachep, req);
@@ -1807,8 +1807,18 @@ static void io_free_req(struct io_kiocb *req)
18071807
struct req_batch {
18081808
void *reqs[IO_IOPOLL_BATCH];
18091809
int to_free;
1810+
1811+
struct task_struct *task;
1812+
int task_refs;
18101813
};
18111814

1815+
static inline void io_init_req_batch(struct req_batch *rb)
1816+
{
1817+
rb->to_free = 0;
1818+
rb->task_refs = 0;
1819+
rb->task = NULL;
1820+
}
1821+
18121822
static void __io_req_free_batch_flush(struct io_ring_ctx *ctx,
18131823
struct req_batch *rb)
18141824
{
@@ -1822,6 +1832,10 @@ static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
18221832
{
18231833
if (rb->to_free)
18241834
__io_req_free_batch_flush(ctx, rb);
1835+
if (rb->task) {
1836+
put_task_struct_many(rb->task, rb->task_refs);
1837+
rb->task = NULL;
1838+
}
18251839
}
18261840

18271841
static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req)
@@ -1833,6 +1847,17 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req)
18331847
if (req->flags & REQ_F_LINK_HEAD)
18341848
io_queue_next(req);
18351849

1850+
if (req->flags & REQ_F_TASK_PINNED) {
1851+
if (req->task != rb->task) {
1852+
if (rb->task)
1853+
put_task_struct_many(rb->task, rb->task_refs);
1854+
rb->task = req->task;
1855+
rb->task_refs = 0;
1856+
}
1857+
rb->task_refs++;
1858+
req->flags &= ~REQ_F_TASK_PINNED;
1859+
}
1860+
18361861
io_dismantle_req(req);
18371862
rb->reqs[rb->to_free++] = req;
18381863
if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs)))
@@ -1978,7 +2003,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
19782003
/* order with ->result store in io_complete_rw_iopoll() */
19792004
smp_rmb();
19802005

1981-
rb.to_free = 0;
2006+
io_init_req_batch(&rb);
19822007
while (!list_empty(done)) {
19832008
int cflags = 0;
19842009

0 commit comments

Comments
 (0)