Skip to content

Commit a581387

Browse files
committed
Merge tag 'io_uring-5.8-2020-07-10' of git://git.kernel.dk/linux-block
Pull io_uring fixes from Jens Axboe: - Fix memleak for error path in registered files (Yang) - Export CQ overflow state in flags, necessary to fix a case where liburing doesn't know if it needs to enter the kernel (Xiaoguang) - Fix for a regression in when user memory is accounted freed, causing issues with back-to-back ring exit + init if the ulimit -l setting is very tight. * tag 'io_uring-5.8-2020-07-10' of git://git.kernel.dk/linux-block: io_uring: account user memory freed when exit has been queued io_uring: fix memleak in io_sqe_files_register() io_uring: fix memleak in __io_sqe_files_update() io_uring: export cq overflow status to userspace
2 parents d33db70 + 309fc03 commit a581387

File tree

2 files changed

+24
-6
lines changed

2 files changed

+24
-6
lines changed

fs/io_uring.c

Lines changed: 23 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1274,6 +1274,7 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
12741274
if (cqe) {
12751275
clear_bit(0, &ctx->sq_check_overflow);
12761276
clear_bit(0, &ctx->cq_check_overflow);
1277+
ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW;
12771278
}
12781279
spin_unlock_irqrestore(&ctx->completion_lock, flags);
12791280
io_cqring_ev_posted(ctx);
@@ -1311,6 +1312,7 @@ static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags)
13111312
if (list_empty(&ctx->cq_overflow_list)) {
13121313
set_bit(0, &ctx->sq_check_overflow);
13131314
set_bit(0, &ctx->cq_check_overflow);
1315+
ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW;
13141316
}
13151317
req->flags |= REQ_F_OVERFLOW;
13161318
refcount_inc(&req->refs);
@@ -6080,9 +6082,9 @@ static int io_sq_thread(void *data)
60806082
}
60816083

60826084
/* Tell userspace we may need a wakeup call */
6085+
spin_lock_irq(&ctx->completion_lock);
60836086
ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
6084-
/* make sure to read SQ tail after writing flags */
6085-
smp_mb();
6087+
spin_unlock_irq(&ctx->completion_lock);
60866088

60876089
to_submit = io_sqring_entries(ctx);
60886090
if (!to_submit || ret == -EBUSY) {
@@ -6100,13 +6102,17 @@ static int io_sq_thread(void *data)
61006102
schedule();
61016103
finish_wait(&ctx->sqo_wait, &wait);
61026104

6105+
spin_lock_irq(&ctx->completion_lock);
61036106
ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
6107+
spin_unlock_irq(&ctx->completion_lock);
61046108
ret = 0;
61056109
continue;
61066110
}
61076111
finish_wait(&ctx->sqo_wait, &wait);
61086112

6113+
spin_lock_irq(&ctx->completion_lock);
61096114
ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
6115+
spin_unlock_irq(&ctx->completion_lock);
61106116
}
61116117

61126118
mutex_lock(&ctx->uring_lock);
@@ -6693,6 +6699,7 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
66936699
for (i = 0; i < nr_tables; i++)
66946700
kfree(ctx->file_data->table[i].files);
66956701

6702+
percpu_ref_exit(&ctx->file_data->refs);
66966703
kfree(ctx->file_data->table);
66976704
kfree(ctx->file_data);
66986705
ctx->file_data = NULL;
@@ -6845,8 +6852,10 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
68456852
}
68466853
table->files[index] = file;
68476854
err = io_sqe_file_register(ctx, file, i);
6848-
if (err)
6855+
if (err) {
6856+
fput(file);
68496857
break;
6858+
}
68506859
}
68516860
nr_args--;
68526861
done++;
@@ -7342,9 +7351,6 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx)
73427351
io_mem_free(ctx->sq_sqes);
73437352

73447353
percpu_ref_exit(&ctx->refs);
7345-
if (ctx->account_mem)
7346-
io_unaccount_mem(ctx->user,
7347-
ring_pages(ctx->sq_entries, ctx->cq_entries));
73487354
free_uid(ctx->user);
73497355
put_cred(ctx->creds);
73507356
kfree(ctx->cancel_hash);
@@ -7429,6 +7435,16 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
74297435
if (ctx->rings)
74307436
io_cqring_overflow_flush(ctx, true);
74317437
idr_for_each(&ctx->personality_idr, io_remove_personalities, ctx);
7438+
7439+
/*
7440+
* Do this upfront, so we won't have a grace period where the ring
7441+
* is closed but resources aren't reaped yet. This can cause
7442+
* spurious failure in setting up a new ring.
7443+
*/
7444+
if (ctx->account_mem)
7445+
io_unaccount_mem(ctx->user,
7446+
ring_pages(ctx->sq_entries, ctx->cq_entries));
7447+
74327448
INIT_WORK(&ctx->exit_work, io_ring_exit_work);
74337449
queue_work(system_wq, &ctx->exit_work);
74347450
}
@@ -7488,6 +7504,7 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
74887504
if (list_empty(&ctx->cq_overflow_list)) {
74897505
clear_bit(0, &ctx->sq_check_overflow);
74907506
clear_bit(0, &ctx->cq_check_overflow);
7507+
ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW;
74917508
}
74927509
spin_unlock_irq(&ctx->completion_lock);
74937510

include/uapi/linux/io_uring.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -197,6 +197,7 @@ struct io_sqring_offsets {
197197
* sq_ring->flags
198198
*/
199199
#define IORING_SQ_NEED_WAKEUP (1U << 0) /* needs io_uring_enter wakeup */
200+
#define IORING_SQ_CQ_OVERFLOW (1U << 1) /* CQ ring is overflown */
200201

201202
struct io_cqring_offsets {
202203
__u32 head;

0 commit comments

Comments
 (0)