Skip to content

Commit 8539429

Browse files
committed
Merge branch 'io_uring-5.5' into for-linus
* io_uring-5.5: io_uring: fix a typo in a comment io_uring: hook all linked requests via link_list io_uring: fix error handling in io_queue_link_head io_uring: use hash table for poll command lookups
2 parents ece841a + 0b4295b commit 8539429

File tree

1 file changed

+69
-71
lines changed

1 file changed

+69
-71
lines changed

fs/io_uring.c

Lines changed: 69 additions & 71 deletions
Original file line numberDiff line numberDiff line change
@@ -145,7 +145,7 @@ struct io_rings {
145145
/*
146146
* Number of completion events lost because the queue was full;
147147
* this should be avoided by the application by making sure
148-
* there are not more requests pending thatn there is space in
148+
* there are not more requests pending than there is space in
149149
* the completion queue.
150150
*
151151
* Written by the kernel, shouldn't be modified by the
@@ -275,7 +275,8 @@ struct io_ring_ctx {
275275
* manipulate the list, hence no extra locking is needed there.
276276
*/
277277
struct list_head poll_list;
278-
struct rb_root cancel_tree;
278+
struct hlist_head *cancel_hash;
279+
unsigned cancel_hash_bits;
279280

280281
spinlock_t inflight_lock;
281282
struct list_head inflight_list;
@@ -355,7 +356,7 @@ struct io_kiocb {
355356
struct io_ring_ctx *ctx;
356357
union {
357358
struct list_head list;
358-
struct rb_node rb_node;
359+
struct hlist_node hash_node;
359360
};
360361
struct list_head link_list;
361362
unsigned int flags;
@@ -444,6 +445,7 @@ static void io_ring_ctx_ref_free(struct percpu_ref *ref)
444445
static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
445446
{
446447
struct io_ring_ctx *ctx;
448+
int hash_bits;
447449

448450
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
449451
if (!ctx)
@@ -457,6 +459,21 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
457459
if (!ctx->completions)
458460
goto err;
459461

462+
/*
463+
* Use 5 bits less than the max cq entries, that should give us around
464+
* 32 entries per hash list if totally full and uniformly spread.
465+
*/
466+
hash_bits = ilog2(p->cq_entries);
467+
hash_bits -= 5;
468+
if (hash_bits <= 0)
469+
hash_bits = 1;
470+
ctx->cancel_hash_bits = hash_bits;
471+
ctx->cancel_hash = kmalloc((1U << hash_bits) * sizeof(struct hlist_head),
472+
GFP_KERNEL);
473+
if (!ctx->cancel_hash)
474+
goto err;
475+
__hash_init(ctx->cancel_hash, 1U << hash_bits);
476+
460477
if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
461478
PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
462479
goto err;
@@ -470,7 +487,6 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
470487
init_waitqueue_head(&ctx->wait);
471488
spin_lock_init(&ctx->completion_lock);
472489
INIT_LIST_HEAD(&ctx->poll_list);
473-
ctx->cancel_tree = RB_ROOT;
474490
INIT_LIST_HEAD(&ctx->defer_list);
475491
INIT_LIST_HEAD(&ctx->timeout_list);
476492
init_waitqueue_head(&ctx->inflight_wait);
@@ -481,6 +497,7 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
481497
if (ctx->fallback_req)
482498
kmem_cache_free(req_cachep, ctx->fallback_req);
483499
kfree(ctx->completions);
500+
kfree(ctx->cancel_hash);
484501
kfree(ctx);
485502
return NULL;
486503
}
@@ -899,7 +916,6 @@ static bool io_link_cancel_timeout(struct io_kiocb *req)
899916
static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
900917
{
901918
struct io_ring_ctx *ctx = req->ctx;
902-
struct io_kiocb *nxt;
903919
bool wake_ev = false;
904920

905921
/* Already got next link */
@@ -911,24 +927,21 @@ static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
911927
* potentially happen if the chain is messed up, check to be on the
912928
* safe side.
913929
*/
914-
nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb, list);
915-
while (nxt) {
916-
list_del_init(&nxt->list);
930+
while (!list_empty(&req->link_list)) {
931+
struct io_kiocb *nxt = list_first_entry(&req->link_list,
932+
struct io_kiocb, link_list);
917933

918-
if ((req->flags & REQ_F_LINK_TIMEOUT) &&
919-
(nxt->flags & REQ_F_TIMEOUT)) {
934+
if (unlikely((req->flags & REQ_F_LINK_TIMEOUT) &&
935+
(nxt->flags & REQ_F_TIMEOUT))) {
936+
list_del_init(&nxt->link_list);
920937
wake_ev |= io_link_cancel_timeout(nxt);
921-
nxt = list_first_entry_or_null(&req->link_list,
922-
struct io_kiocb, list);
923938
req->flags &= ~REQ_F_LINK_TIMEOUT;
924939
continue;
925940
}
926-
if (!list_empty(&req->link_list)) {
927-
INIT_LIST_HEAD(&nxt->link_list);
928-
list_splice(&req->link_list, &nxt->link_list);
929-
nxt->flags |= REQ_F_LINK;
930-
}
931941

942+
list_del_init(&req->link_list);
943+
if (!list_empty(&nxt->link_list))
944+
nxt->flags |= REQ_F_LINK;
932945
*nxtptr = nxt;
933946
break;
934947
}
@@ -944,15 +957,15 @@ static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
944957
static void io_fail_links(struct io_kiocb *req)
945958
{
946959
struct io_ring_ctx *ctx = req->ctx;
947-
struct io_kiocb *link;
948960
unsigned long flags;
949961

950962
spin_lock_irqsave(&ctx->completion_lock, flags);
951963

952964
while (!list_empty(&req->link_list)) {
953-
link = list_first_entry(&req->link_list, struct io_kiocb, list);
954-
list_del_init(&link->list);
965+
struct io_kiocb *link = list_first_entry(&req->link_list,
966+
struct io_kiocb, link_list);
955967

968+
list_del_init(&link->link_list);
956969
trace_io_uring_fail_link(req, link);
957970

958971
if ((req->flags & REQ_F_LINK_TIMEOUT) &&
@@ -2260,14 +2273,6 @@ static int io_connect(struct io_kiocb *req, const struct io_uring_sqe *sqe,
22602273
#endif
22612274
}
22622275

2263-
static inline void io_poll_remove_req(struct io_kiocb *req)
2264-
{
2265-
if (!RB_EMPTY_NODE(&req->rb_node)) {
2266-
rb_erase(&req->rb_node, &req->ctx->cancel_tree);
2267-
RB_CLEAR_NODE(&req->rb_node);
2268-
}
2269-
}
2270-
22712276
static void io_poll_remove_one(struct io_kiocb *req)
22722277
{
22732278
struct io_poll_iocb *poll = &req->poll;
@@ -2279,36 +2284,34 @@ static void io_poll_remove_one(struct io_kiocb *req)
22792284
io_queue_async_work(req);
22802285
}
22812286
spin_unlock(&poll->head->lock);
2282-
io_poll_remove_req(req);
2287+
hash_del(&req->hash_node);
22832288
}
22842289

22852290
static void io_poll_remove_all(struct io_ring_ctx *ctx)
22862291
{
2287-
struct rb_node *node;
2292+
struct hlist_node *tmp;
22882293
struct io_kiocb *req;
2294+
int i;
22892295

22902296
spin_lock_irq(&ctx->completion_lock);
2291-
while ((node = rb_first(&ctx->cancel_tree)) != NULL) {
2292-
req = rb_entry(node, struct io_kiocb, rb_node);
2293-
io_poll_remove_one(req);
2297+
for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
2298+
struct hlist_head *list;
2299+
2300+
list = &ctx->cancel_hash[i];
2301+
hlist_for_each_entry_safe(req, tmp, list, hash_node)
2302+
io_poll_remove_one(req);
22942303
}
22952304
spin_unlock_irq(&ctx->completion_lock);
22962305
}
22972306

22982307
static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr)
22992308
{
2300-
struct rb_node *p, *parent = NULL;
2309+
struct hlist_head *list;
23012310
struct io_kiocb *req;
23022311

2303-
p = ctx->cancel_tree.rb_node;
2304-
while (p) {
2305-
parent = p;
2306-
req = rb_entry(parent, struct io_kiocb, rb_node);
2307-
if (sqe_addr < req->user_data) {
2308-
p = p->rb_left;
2309-
} else if (sqe_addr > req->user_data) {
2310-
p = p->rb_right;
2311-
} else {
2312+
list = &ctx->cancel_hash[hash_long(sqe_addr, ctx->cancel_hash_bits)];
2313+
hlist_for_each_entry(req, list, hash_node) {
2314+
if (sqe_addr == req->user_data) {
23122315
io_poll_remove_one(req);
23132316
return 0;
23142317
}
@@ -2390,7 +2393,7 @@ static void io_poll_complete_work(struct io_wq_work **workptr)
23902393
spin_unlock_irq(&ctx->completion_lock);
23912394
return;
23922395
}
2393-
io_poll_remove_req(req);
2396+
hash_del(&req->hash_node);
23942397
io_poll_complete(req, mask, ret);
23952398
spin_unlock_irq(&ctx->completion_lock);
23962399

@@ -2425,7 +2428,7 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
24252428
* for finalizing the request, mark us as having grabbed that already.
24262429
*/
24272430
if (mask && spin_trylock_irqsave(&ctx->completion_lock, flags)) {
2428-
io_poll_remove_req(req);
2431+
hash_del(&req->hash_node);
24292432
io_poll_complete(req, mask, 0);
24302433
req->flags |= REQ_F_COMP_LOCKED;
24312434
io_put_req(req);
@@ -2463,20 +2466,10 @@ static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
24632466
static void io_poll_req_insert(struct io_kiocb *req)
24642467
{
24652468
struct io_ring_ctx *ctx = req->ctx;
2466-
struct rb_node **p = &ctx->cancel_tree.rb_node;
2467-
struct rb_node *parent = NULL;
2468-
struct io_kiocb *tmp;
2469-
2470-
while (*p) {
2471-
parent = *p;
2472-
tmp = rb_entry(parent, struct io_kiocb, rb_node);
2473-
if (req->user_data < tmp->user_data)
2474-
p = &(*p)->rb_left;
2475-
else
2476-
p = &(*p)->rb_right;
2477-
}
2478-
rb_link_node(&req->rb_node, parent, p);
2479-
rb_insert_color(&req->rb_node, &ctx->cancel_tree);
2469+
struct hlist_head *list;
2470+
2471+
list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)];
2472+
hlist_add_head(&req->hash_node, list);
24802473
}
24812474

24822475
static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe,
@@ -2504,7 +2497,7 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe,
25042497
INIT_IO_WORK(&req->work, io_poll_complete_work);
25052498
events = READ_ONCE(sqe->poll_events);
25062499
poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP;
2507-
RB_CLEAR_NODE(&req->rb_node);
2500+
INIT_HLIST_NODE(&req->hash_node);
25082501

25092502
poll->head = NULL;
25102503
poll->done = false;
@@ -3173,10 +3166,11 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
31733166
* We don't expect the list to be empty, that will only happen if we
31743167
* race with the completion of the linked work.
31753168
*/
3176-
if (!list_empty(&req->list)) {
3177-
prev = list_entry(req->list.prev, struct io_kiocb, link_list);
3169+
if (!list_empty(&req->link_list)) {
3170+
prev = list_entry(req->link_list.prev, struct io_kiocb,
3171+
link_list);
31783172
if (refcount_inc_not_zero(&prev->refs)) {
3179-
list_del_init(&req->list);
3173+
list_del_init(&req->link_list);
31803174
prev->flags &= ~REQ_F_LINK_TIMEOUT;
31813175
} else
31823176
prev = NULL;
@@ -3206,7 +3200,7 @@ static void io_queue_linked_timeout(struct io_kiocb *req)
32063200
* we got a chance to setup the timer
32073201
*/
32083202
spin_lock_irq(&ctx->completion_lock);
3209-
if (!list_empty(&req->list)) {
3203+
if (!list_empty(&req->link_list)) {
32103204
struct io_timeout_data *data = &req->io->timeout;
32113205

32123206
data->timer.function = io_link_timeout_fn;
@@ -3226,7 +3220,8 @@ static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
32263220
if (!(req->flags & REQ_F_LINK))
32273221
return NULL;
32283222

3229-
nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb, list);
3223+
nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb,
3224+
link_list);
32303225
if (!nxt || nxt->sqe->opcode != IORING_OP_LINK_TIMEOUT)
32313226
return NULL;
32323227

@@ -3318,7 +3313,7 @@ static inline void io_queue_link_head(struct io_kiocb *req)
33183313

33193314
#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK)
33203315

3321-
static void io_submit_sqe(struct io_kiocb *req, struct io_submit_state *state,
3316+
static bool io_submit_sqe(struct io_kiocb *req, struct io_submit_state *state,
33223317
struct io_kiocb **link)
33233318
{
33243319
struct io_ring_ctx *ctx = req->ctx;
@@ -3337,7 +3332,7 @@ static void io_submit_sqe(struct io_kiocb *req, struct io_submit_state *state,
33373332
err_req:
33383333
io_cqring_add_event(req, ret);
33393334
io_double_put_req(req);
3340-
return;
3335+
return false;
33413336
}
33423337

33433338
/*
@@ -3367,7 +3362,7 @@ static void io_submit_sqe(struct io_kiocb *req, struct io_submit_state *state,
33673362
goto err_req;
33683363
}
33693364
trace_io_uring_link(ctx, req, prev);
3370-
list_add_tail(&req->list, &prev->link_list);
3365+
list_add_tail(&req->link_list, &prev->link_list);
33713366
} else if (req->sqe->flags & IOSQE_IO_LINK) {
33723367
req->flags |= REQ_F_LINK;
33733368

@@ -3376,6 +3371,8 @@ static void io_submit_sqe(struct io_kiocb *req, struct io_submit_state *state,
33763371
} else {
33773372
io_queue_sqe(req);
33783373
}
3374+
3375+
return true;
33793376
}
33803377

33813378
/*
@@ -3505,6 +3502,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
35053502
}
35063503
}
35073504

3505+
submitted++;
35083506
sqe_flags = req->sqe->flags;
35093507

35103508
req->ring_file = ring_file;
@@ -3514,9 +3512,8 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
35143512
req->needs_fixed_file = async;
35153513
trace_io_uring_submit_sqe(ctx, req->sqe->user_data,
35163514
true, async);
3517-
io_submit_sqe(req, statep, &link);
3518-
submitted++;
3519-
3515+
if (!io_submit_sqe(req, statep, &link))
3516+
break;
35203517
/*
35213518
* If previous wasn't linked and we have a linked command,
35223519
* that's the end of the chain. Submit the previous link.
@@ -4644,6 +4641,7 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx)
46444641
free_uid(ctx->user);
46454642
put_cred(ctx->creds);
46464643
kfree(ctx->completions);
4644+
kfree(ctx->cancel_hash);
46474645
kmem_cache_free(req_cachep, ctx->fallback_req);
46484646
kfree(ctx);
46494647
}

0 commit comments

Comments
 (0)