@@ -502,6 +502,7 @@ struct io_poll_update {
502
502
struct io_close {
503
503
struct file * file ;
504
504
int fd ;
505
+ u32 file_slot ;
505
506
};
506
507
507
508
struct io_timeout_data {
@@ -1098,6 +1099,8 @@ static int io_req_prep_async(struct io_kiocb *req);
1098
1099
1099
1100
static int io_install_fixed_file (struct io_kiocb * req , struct file * file ,
1100
1101
unsigned int issue_flags , u32 slot_index );
1102
+ static int io_close_fixed (struct io_kiocb * req , unsigned int issue_flags );
1103
+
1101
1104
static enum hrtimer_restart io_link_timeout_fn (struct hrtimer * timer );
1102
1105
1103
1106
static struct kmem_cache * req_cachep ;
@@ -3605,7 +3608,6 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
3605
3608
iov_iter_save_state (iter , state );
3606
3609
}
3607
3610
req -> result = iov_iter_count (iter );
3608
- ret2 = 0 ;
3609
3611
3610
3612
/* Ensure we clear previously set non-block flag */
3611
3613
if (!force_nonblock )
@@ -3670,8 +3672,6 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
3670
3672
} else {
3671
3673
copy_iov :
3672
3674
iov_iter_restore (iter , state );
3673
- if (ret2 > 0 )
3674
- iov_iter_advance (iter , ret2 );
3675
3675
ret = io_setup_async_rw (req , iovec , inline_vecs , iter , false);
3676
3676
return ret ?: - EAGAIN ;
3677
3677
}
@@ -4387,7 +4387,7 @@ static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
4387
4387
int i , bid = pbuf -> bid ;
4388
4388
4389
4389
for (i = 0 ; i < pbuf -> nbufs ; i ++ ) {
4390
- buf = kmalloc (sizeof (* buf ), GFP_KERNEL );
4390
+ buf = kmalloc (sizeof (* buf ), GFP_KERNEL_ACCOUNT );
4391
4391
if (!buf )
4392
4392
break ;
4393
4393
@@ -4594,12 +4594,16 @@ static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4594
4594
if (unlikely (req -> ctx -> flags & IORING_SETUP_IOPOLL ))
4595
4595
return - EINVAL ;
4596
4596
if (sqe -> ioprio || sqe -> off || sqe -> addr || sqe -> len ||
4597
- sqe -> rw_flags || sqe -> buf_index || sqe -> splice_fd_in )
4597
+ sqe -> rw_flags || sqe -> buf_index )
4598
4598
return - EINVAL ;
4599
4599
if (req -> flags & REQ_F_FIXED_FILE )
4600
4600
return - EBADF ;
4601
4601
4602
4602
req -> close .fd = READ_ONCE (sqe -> fd );
4603
+ req -> close .file_slot = READ_ONCE (sqe -> file_index );
4604
+ if (req -> close .file_slot && req -> close .fd )
4605
+ return - EINVAL ;
4606
+
4603
4607
return 0 ;
4604
4608
}
4605
4609
@@ -4611,6 +4615,11 @@ static int io_close(struct io_kiocb *req, unsigned int issue_flags)
4611
4615
struct file * file = NULL ;
4612
4616
int ret = - EBADF ;
4613
4617
4618
+ if (req -> close .file_slot ) {
4619
+ ret = io_close_fixed (req , issue_flags );
4620
+ goto err ;
4621
+ }
4622
+
4614
4623
spin_lock (& files -> file_lock );
4615
4624
fdt = files_fdtable (files );
4616
4625
if (close -> fd >= fdt -> max_fds ) {
@@ -5338,7 +5347,7 @@ static bool __io_poll_complete(struct io_kiocb *req, __poll_t mask)
5338
5347
if (req -> poll .events & EPOLLONESHOT )
5339
5348
flags = 0 ;
5340
5349
if (!io_cqring_fill_event (ctx , req -> user_data , error , flags )) {
5341
- req -> poll .done = true ;
5350
+ req -> poll .events |= EPOLLONESHOT ;
5342
5351
flags = 0 ;
5343
5352
}
5344
5353
if (flags & IORING_CQE_F_MORE )
@@ -5367,10 +5376,15 @@ static void io_poll_task_func(struct io_kiocb *req, bool *locked)
5367
5376
} else {
5368
5377
bool done ;
5369
5378
5379
+ if (req -> poll .done ) {
5380
+ spin_unlock (& ctx -> completion_lock );
5381
+ return ;
5382
+ }
5370
5383
done = __io_poll_complete (req , req -> result );
5371
5384
if (done ) {
5372
5385
io_poll_remove_double (req );
5373
5386
hash_del (& req -> hash_node );
5387
+ req -> poll .done = true;
5374
5388
} else {
5375
5389
req -> result = 0 ;
5376
5390
add_wait_queue (req -> poll .head , & req -> poll .wait );
@@ -5508,6 +5522,7 @@ static void io_async_task_func(struct io_kiocb *req, bool *locked)
5508
5522
5509
5523
hash_del (& req -> hash_node );
5510
5524
io_poll_remove_double (req );
5525
+ apoll -> poll .done = true;
5511
5526
spin_unlock (& ctx -> completion_lock );
5512
5527
5513
5528
if (!READ_ONCE (apoll -> poll .canceled ))
@@ -5828,6 +5843,7 @@ static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
5828
5843
struct io_ring_ctx * ctx = req -> ctx ;
5829
5844
struct io_poll_table ipt ;
5830
5845
__poll_t mask ;
5846
+ bool done ;
5831
5847
5832
5848
ipt .pt ._qproc = io_poll_queue_proc ;
5833
5849
@@ -5836,13 +5852,13 @@ static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
5836
5852
5837
5853
if (mask ) { /* no async, we'd stolen it */
5838
5854
ipt .error = 0 ;
5839
- io_poll_complete (req , mask );
5855
+ done = io_poll_complete (req , mask );
5840
5856
}
5841
5857
spin_unlock (& ctx -> completion_lock );
5842
5858
5843
5859
if (mask ) {
5844
5860
io_cqring_ev_posted (ctx );
5845
- if (poll -> events & EPOLLONESHOT )
5861
+ if (done )
5846
5862
io_put_req (req );
5847
5863
}
5848
5864
return ipt .error ;
@@ -6333,19 +6349,16 @@ static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
6333
6349
struct io_uring_rsrc_update2 up ;
6334
6350
int ret ;
6335
6351
6336
- if (issue_flags & IO_URING_F_NONBLOCK )
6337
- return - EAGAIN ;
6338
-
6339
6352
up .offset = req -> rsrc_update .offset ;
6340
6353
up .data = req -> rsrc_update .arg ;
6341
6354
up .nr = 0 ;
6342
6355
up .tags = 0 ;
6343
6356
up .resv = 0 ;
6344
6357
6345
- mutex_lock ( & ctx -> uring_lock );
6358
+ io_ring_submit_lock ( ctx , !( issue_flags & IO_URING_F_NONBLOCK ) );
6346
6359
ret = __io_register_rsrc_update (ctx , IORING_RSRC_FILE ,
6347
6360
& up , req -> rsrc_update .nr_args );
6348
- mutex_unlock ( & ctx -> uring_lock );
6361
+ io_ring_submit_unlock ( ctx , !( issue_flags & IO_URING_F_NONBLOCK ) );
6349
6362
6350
6363
if (ret < 0 )
6351
6364
req_set_fail (req );
@@ -8400,6 +8413,44 @@ static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
8400
8413
return ret ;
8401
8414
}
8402
8415
8416
+ static int io_close_fixed (struct io_kiocb * req , unsigned int issue_flags )
8417
+ {
8418
+ unsigned int offset = req -> close .file_slot - 1 ;
8419
+ struct io_ring_ctx * ctx = req -> ctx ;
8420
+ struct io_fixed_file * file_slot ;
8421
+ struct file * file ;
8422
+ int ret , i ;
8423
+
8424
+ io_ring_submit_lock (ctx , !(issue_flags & IO_URING_F_NONBLOCK ));
8425
+ ret = - ENXIO ;
8426
+ if (unlikely (!ctx -> file_data ))
8427
+ goto out ;
8428
+ ret = - EINVAL ;
8429
+ if (offset >= ctx -> nr_user_files )
8430
+ goto out ;
8431
+ ret = io_rsrc_node_switch_start (ctx );
8432
+ if (ret )
8433
+ goto out ;
8434
+
8435
+ i = array_index_nospec (offset , ctx -> nr_user_files );
8436
+ file_slot = io_fixed_file_slot (& ctx -> file_table , i );
8437
+ ret = - EBADF ;
8438
+ if (!file_slot -> file_ptr )
8439
+ goto out ;
8440
+
8441
+ file = (struct file * )(file_slot -> file_ptr & FFS_MASK );
8442
+ ret = io_queue_rsrc_removal (ctx -> file_data , offset , ctx -> rsrc_node , file );
8443
+ if (ret )
8444
+ goto out ;
8445
+
8446
+ file_slot -> file_ptr = 0 ;
8447
+ io_rsrc_node_switch (ctx , ctx -> file_data );
8448
+ ret = 0 ;
8449
+ out :
8450
+ io_ring_submit_unlock (ctx , !(issue_flags & IO_URING_F_NONBLOCK ));
8451
+ return ret ;
8452
+ }
8453
+
8403
8454
static int __io_sqe_files_update (struct io_ring_ctx * ctx ,
8404
8455
struct io_uring_rsrc_update2 * up ,
8405
8456
unsigned nr_args )
@@ -9166,8 +9217,10 @@ static void io_destroy_buffers(struct io_ring_ctx *ctx)
9166
9217
struct io_buffer * buf ;
9167
9218
unsigned long index ;
9168
9219
9169
- xa_for_each (& ctx -> io_buffers , index , buf )
9220
+ xa_for_each (& ctx -> io_buffers , index , buf ) {
9170
9221
__io_remove_buffers (ctx , buf , index , -1U );
9222
+ cond_resched ();
9223
+ }
9171
9224
}
9172
9225
9173
9226
static void io_req_cache_free (struct list_head * list )
@@ -9665,8 +9718,10 @@ static void io_uring_clean_tctx(struct io_uring_task *tctx)
9665
9718
struct io_tctx_node * node ;
9666
9719
unsigned long index ;
9667
9720
9668
- xa_for_each (& tctx -> xa , index , node )
9721
+ xa_for_each (& tctx -> xa , index , node ) {
9669
9722
io_uring_del_tctx_node (index );
9723
+ cond_resched ();
9724
+ }
9670
9725
if (wq ) {
9671
9726
/*
9672
9727
* Must be after io_uring_del_task_file() (removes nodes under
0 commit comments