@@ -524,6 +524,7 @@ enum {
524
524
REQ_F_OVERFLOW_BIT ,
525
525
REQ_F_POLLED_BIT ,
526
526
REQ_F_BUFFER_SELECTED_BIT ,
527
+ REQ_F_NO_FILE_TABLE_BIT ,
527
528
528
529
/* not a real bit, just to check we're not overflowing the space */
529
530
__REQ_F_LAST_BIT ,
@@ -577,6 +578,8 @@ enum {
577
578
REQ_F_POLLED = BIT (REQ_F_POLLED_BIT ),
578
579
/* buffer already selected */
579
580
REQ_F_BUFFER_SELECTED = BIT (REQ_F_BUFFER_SELECTED_BIT ),
581
+ /* doesn't need file table for this request */
582
+ REQ_F_NO_FILE_TABLE = BIT (REQ_F_NO_FILE_TABLE_BIT ),
580
583
};
581
584
582
585
struct async_poll {
@@ -799,6 +802,7 @@ static const struct io_op_def io_op_defs[] = {
799
802
.needs_file = 1 ,
800
803
.fd_non_neg = 1 ,
801
804
.needs_fs = 1 ,
805
+ .file_table = 1 ,
802
806
},
803
807
[IORING_OP_READ ] = {
804
808
.needs_mm = 1 ,
@@ -1291,7 +1295,7 @@ static struct io_kiocb *io_get_fallback_req(struct io_ring_ctx *ctx)
1291
1295
struct io_kiocb * req ;
1292
1296
1293
1297
req = ctx -> fallback_req ;
1294
- if (!test_and_set_bit_lock (0 , (unsigned long * ) ctx -> fallback_req ))
1298
+ if (!test_and_set_bit_lock (0 , (unsigned long * ) & ctx -> fallback_req ))
1295
1299
return req ;
1296
1300
1297
1301
return NULL ;
@@ -1378,7 +1382,7 @@ static void __io_free_req(struct io_kiocb *req)
1378
1382
if (likely (!io_is_fallback_req (req )))
1379
1383
kmem_cache_free (req_cachep , req );
1380
1384
else
1381
- clear_bit_unlock (0 , (unsigned long * ) req -> ctx -> fallback_req );
1385
+ clear_bit_unlock (0 , (unsigned long * ) & req -> ctx -> fallback_req );
1382
1386
}
1383
1387
1384
1388
struct req_batch {
@@ -2034,7 +2038,7 @@ static struct file *__io_file_get(struct io_submit_state *state, int fd)
2034
2038
* any file. For now, just ensure that anything potentially problematic is done
2035
2039
* inline.
2036
2040
*/
2037
- static bool io_file_supports_async (struct file * file )
2041
+ static bool io_file_supports_async (struct file * file , int rw )
2038
2042
{
2039
2043
umode_t mode = file_inode (file )-> i_mode ;
2040
2044
@@ -2043,7 +2047,13 @@ static bool io_file_supports_async(struct file *file)
2043
2047
if (S_ISREG (mode ) && file -> f_op != & io_uring_fops )
2044
2048
return true;
2045
2049
2046
- return false;
2050
+ if (!(file -> f_mode & FMODE_NOWAIT ))
2051
+ return false;
2052
+
2053
+ if (rw == READ )
2054
+ return file -> f_op -> read_iter != NULL ;
2055
+
2056
+ return file -> f_op -> write_iter != NULL ;
2047
2057
}
2048
2058
2049
2059
static int io_prep_rw (struct io_kiocb * req , const struct io_uring_sqe * sqe ,
@@ -2571,7 +2581,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock)
2571
2581
* If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
2572
2582
* we know to async punt it even if it was opened O_NONBLOCK
2573
2583
*/
2574
- if (force_nonblock && !io_file_supports_async (req -> file ))
2584
+ if (force_nonblock && !io_file_supports_async (req -> file , READ ))
2575
2585
goto copy_iov ;
2576
2586
2577
2587
iov_count = iov_iter_count (& iter );
@@ -2594,7 +2604,8 @@ static int io_read(struct io_kiocb *req, bool force_nonblock)
2594
2604
if (ret )
2595
2605
goto out_free ;
2596
2606
/* any defer here is final, must blocking retry */
2597
- if (!(req -> flags & REQ_F_NOWAIT ))
2607
+ if (!(req -> flags & REQ_F_NOWAIT ) &&
2608
+ !file_can_poll (req -> file ))
2598
2609
req -> flags |= REQ_F_MUST_PUNT ;
2599
2610
return - EAGAIN ;
2600
2611
}
@@ -2662,7 +2673,7 @@ static int io_write(struct io_kiocb *req, bool force_nonblock)
2662
2673
* If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
2663
2674
* we know to async punt it even if it was opened O_NONBLOCK
2664
2675
*/
2665
- if (force_nonblock && !io_file_supports_async (req -> file ))
2676
+ if (force_nonblock && !io_file_supports_async (req -> file , WRITE ))
2666
2677
goto copy_iov ;
2667
2678
2668
2679
/* file path doesn't support NOWAIT for non-direct_IO */
@@ -2716,7 +2727,8 @@ static int io_write(struct io_kiocb *req, bool force_nonblock)
2716
2727
if (ret )
2717
2728
goto out_free ;
2718
2729
/* any defer here is final, must blocking retry */
2719
- req -> flags |= REQ_F_MUST_PUNT ;
2730
+ if (!file_can_poll (req -> file ))
2731
+ req -> flags |= REQ_F_MUST_PUNT ;
2720
2732
return - EAGAIN ;
2721
2733
}
2722
2734
}
@@ -2756,15 +2768,6 @@ static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
2756
2768
return 0 ;
2757
2769
}
2758
2770
2759
- static bool io_splice_punt (struct file * file )
2760
- {
2761
- if (get_pipe_info (file ))
2762
- return false;
2763
- if (!io_file_supports_async (file ))
2764
- return true;
2765
- return !(file -> f_flags & O_NONBLOCK );
2766
- }
2767
-
2768
2771
static int io_splice (struct io_kiocb * req , bool force_nonblock )
2769
2772
{
2770
2773
struct io_splice * sp = & req -> splice ;
@@ -2774,11 +2777,8 @@ static int io_splice(struct io_kiocb *req, bool force_nonblock)
2774
2777
loff_t * poff_in , * poff_out ;
2775
2778
long ret ;
2776
2779
2777
- if (force_nonblock ) {
2778
- if (io_splice_punt (in ) || io_splice_punt (out ))
2779
- return - EAGAIN ;
2780
- flags |= SPLICE_F_NONBLOCK ;
2781
- }
2780
+ if (force_nonblock )
2781
+ return - EAGAIN ;
2782
2782
2783
2783
poff_in = (sp -> off_in == -1 ) ? NULL : & sp -> off_in ;
2784
2784
poff_out = (sp -> off_out == -1 ) ? NULL : & sp -> off_out ;
@@ -3355,8 +3355,12 @@ static int io_statx(struct io_kiocb *req, bool force_nonblock)
3355
3355
struct kstat stat ;
3356
3356
int ret ;
3357
3357
3358
- if (force_nonblock )
3358
+ if (force_nonblock ) {
3359
+ /* only need file table for an actual valid fd */
3360
+ if (ctx -> dfd == -1 || ctx -> dfd == AT_FDCWD )
3361
+ req -> flags |= REQ_F_NO_FILE_TABLE ;
3359
3362
return - EAGAIN ;
3363
+ }
3360
3364
3361
3365
if (vfs_stat_set_lookup_flags (& lookup_flags , ctx -> how .flags ))
3362
3366
return - EINVAL ;
@@ -3502,7 +3506,7 @@ static void io_sync_file_range_finish(struct io_wq_work **workptr)
3502
3506
if (io_req_cancelled (req ))
3503
3507
return ;
3504
3508
__io_sync_file_range (req );
3505
- io_put_req (req ); /* put submission ref */
3509
+ io_steal_work (req , workptr );
3506
3510
}
3507
3511
3508
3512
static int io_sync_file_range (struct io_kiocb * req , bool force_nonblock )
@@ -5015,7 +5019,7 @@ static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5015
5019
int ret ;
5016
5020
5017
5021
/* Still need defer if there is pending req in defer list. */
5018
- if (!req_need_defer (req ) && list_empty (& ctx -> defer_list ))
5022
+ if (!req_need_defer (req ) && list_empty_careful (& ctx -> defer_list ))
5019
5023
return 0 ;
5020
5024
5021
5025
if (!req -> io && io_alloc_async_ctx (req ))
@@ -5429,7 +5433,7 @@ static int io_grab_files(struct io_kiocb *req)
5429
5433
int ret = - EBADF ;
5430
5434
struct io_ring_ctx * ctx = req -> ctx ;
5431
5435
5432
- if (req -> work .files )
5436
+ if (req -> work .files || ( req -> flags & REQ_F_NO_FILE_TABLE ) )
5433
5437
return 0 ;
5434
5438
if (!ctx -> ring_file )
5435
5439
return - EBADF ;
@@ -7327,7 +7331,7 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
7327
7331
* it could cause shutdown to hang.
7328
7332
*/
7329
7333
while (ctx -> sqo_thread && !wq_has_sleeper (& ctx -> sqo_wait ))
7330
- cpu_relax ();
7334
+ cond_resched ();
7331
7335
7332
7336
io_kill_timeouts (ctx );
7333
7337
io_poll_remove_all (ctx );
0 commit comments