Skip to content

Commit e139a1e

Browse files
isilenceaxboe
authored andcommitted
io_uring: apply max_workers limit to all future users
Currently, IORING_REGISTER_IOWQ_MAX_WORKERS applies only to the task that issued it, it's unexpected for users. If one task creates a ring, limits workers and then passes it to another task the limit won't be applied to the other task. Another pitfall is that a task should either create a ring or submit at least one request for IORING_REGISTER_IOWQ_MAX_WORKERS to work at all, furher complicating the picture. Change the API, save the limits and apply to all future users. Note, it should be done first before giving away the ring or submitting new requests otherwise the result is not guaranteed. Fixes: 2e48005 ("io-wq: provide a way to limit max number of workers") Link: axboe/liburing#460 Reported-by: Beld Zhang <[email protected]> Signed-off-by: Pavel Begunkov <[email protected]> Link: https://lore.kernel.org/r/51d0bae97180e08ab722c0d5c93e7439cfb6f697.1634683237.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <[email protected]>
1 parent bc36992 commit e139a1e

File tree

1 file changed

+23
-6
lines changed

1 file changed

+23
-6
lines changed

fs/io_uring.c

Lines changed: 23 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -456,6 +456,8 @@ struct io_ring_ctx {
456456
struct work_struct exit_work;
457457
struct list_head tctx_list;
458458
struct completion ref_comp;
459+
u32 iowq_limits[2];
460+
bool iowq_limits_set;
459461
};
460462
};
461463

@@ -9638,7 +9640,16 @@ static int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
96389640
ret = io_uring_alloc_task_context(current, ctx);
96399641
if (unlikely(ret))
96409642
return ret;
9643+
96419644
tctx = current->io_uring;
9645+
if (ctx->iowq_limits_set) {
9646+
unsigned int limits[2] = { ctx->iowq_limits[0],
9647+
ctx->iowq_limits[1], };
9648+
9649+
ret = io_wq_max_workers(tctx->io_wq, limits);
9650+
if (ret)
9651+
return ret;
9652+
}
96429653
}
96439654
if (!xa_load(&tctx->xa, (unsigned long)ctx)) {
96449655
node = kmalloc(sizeof(*node), GFP_KERNEL);
@@ -10674,13 +10685,19 @@ static int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
1067410685
tctx = current->io_uring;
1067510686
}
1067610687

10677-
ret = -EINVAL;
10678-
if (!tctx || !tctx->io_wq)
10679-
goto err;
10688+
BUILD_BUG_ON(sizeof(new_count) != sizeof(ctx->iowq_limits));
1068010689

10681-
ret = io_wq_max_workers(tctx->io_wq, new_count);
10682-
if (ret)
10683-
goto err;
10690+
memcpy(ctx->iowq_limits, new_count, sizeof(new_count));
10691+
ctx->iowq_limits_set = true;
10692+
10693+
ret = -EINVAL;
10694+
if (tctx && tctx->io_wq) {
10695+
ret = io_wq_max_workers(tctx->io_wq, new_count);
10696+
if (ret)
10697+
goto err;
10698+
} else {
10699+
memset(new_count, 0, sizeof(new_count));
10700+
}
1068410701

1068510702
if (sqd) {
1068610703
mutex_unlock(&sqd->lock);

0 commit comments

Comments
 (0)