@@ -371,7 +371,6 @@ struct io_async_rw {
371
371
};
372
372
373
373
struct io_async_ctx {
374
- struct io_uring_sqe sqe ;
375
374
union {
376
375
struct io_async_rw rw ;
377
376
struct io_async_msghdr msg ;
@@ -433,7 +432,6 @@ struct io_kiocb {
433
432
#define REQ_F_INFLIGHT 16384 /* on inflight list */
434
433
#define REQ_F_COMP_LOCKED 32768 /* completion under lock */
435
434
#define REQ_F_HARDLINK 65536 /* doesn't sever on completion < 0 */
436
- #define REQ_F_PREPPED 131072 /* request already opcode prepared */
437
435
u64 user_data ;
438
436
u32 result ;
439
437
u32 sequence ;
@@ -1501,6 +1499,8 @@ static int io_prep_rw(struct io_kiocb *req, bool force_nonblock)
1501
1499
unsigned ioprio ;
1502
1500
int ret ;
1503
1501
1502
+ if (!sqe )
1503
+ return 0 ;
1504
1504
if (!req -> file )
1505
1505
return - EBADF ;
1506
1506
@@ -1552,6 +1552,7 @@ static int io_prep_rw(struct io_kiocb *req, bool force_nonblock)
1552
1552
/* we own ->private, reuse it for the buffer index */
1553
1553
req -> rw .kiocb .private = (void * ) (unsigned long )
1554
1554
READ_ONCE (req -> sqe -> buf_index );
1555
+ req -> sqe = NULL ;
1555
1556
return 0 ;
1556
1557
}
1557
1558
@@ -1773,13 +1774,7 @@ static void io_req_map_rw(struct io_kiocb *req, ssize_t io_size,
1773
1774
static int io_alloc_async_ctx (struct io_kiocb * req )
1774
1775
{
1775
1776
req -> io = kmalloc (sizeof (* req -> io ), GFP_KERNEL );
1776
- if (req -> io ) {
1777
- memcpy (& req -> io -> sqe , req -> sqe , sizeof (req -> io -> sqe ));
1778
- req -> sqe = & req -> io -> sqe ;
1779
- return 0 ;
1780
- }
1781
-
1782
- return 1 ;
1777
+ return req -> io == NULL ;
1783
1778
}
1784
1779
1785
1780
static void io_rw_async (struct io_wq_work * * workptr )
@@ -1810,12 +1805,14 @@ static int io_read_prep(struct io_kiocb *req, struct iovec **iovec,
1810
1805
{
1811
1806
ssize_t ret ;
1812
1807
1813
- ret = io_prep_rw (req , force_nonblock );
1814
- if (ret )
1815
- return ret ;
1808
+ if (req -> sqe ) {
1809
+ ret = io_prep_rw (req , force_nonblock );
1810
+ if (ret )
1811
+ return ret ;
1816
1812
1817
- if (unlikely (!(req -> file -> f_mode & FMODE_READ )))
1818
- return - EBADF ;
1813
+ if (unlikely (!(req -> file -> f_mode & FMODE_READ )))
1814
+ return - EBADF ;
1815
+ }
1819
1816
1820
1817
return io_import_iovec (READ , req , iovec , iter );
1821
1818
}
@@ -1829,15 +1826,9 @@ static int io_read(struct io_kiocb *req, struct io_kiocb **nxt,
1829
1826
size_t iov_count ;
1830
1827
ssize_t io_size , ret ;
1831
1828
1832
- if (!req -> io ) {
1833
- ret = io_read_prep (req , & iovec , & iter , force_nonblock );
1834
- if (ret < 0 )
1835
- return ret ;
1836
- } else {
1837
- ret = io_import_iovec (READ , req , & iovec , & iter );
1838
- if (ret < 0 )
1839
- return ret ;
1840
- }
1829
+ ret = io_read_prep (req , & iovec , & iter , force_nonblock );
1830
+ if (ret < 0 )
1831
+ return ret ;
1841
1832
1842
1833
/* Ensure we clear previously set non-block flag */
1843
1834
if (!force_nonblock )
@@ -1901,12 +1892,14 @@ static int io_write_prep(struct io_kiocb *req, struct iovec **iovec,
1901
1892
{
1902
1893
ssize_t ret ;
1903
1894
1904
- ret = io_prep_rw (req , force_nonblock );
1905
- if (ret )
1906
- return ret ;
1895
+ if (req -> sqe ) {
1896
+ ret = io_prep_rw (req , force_nonblock );
1897
+ if (ret )
1898
+ return ret ;
1907
1899
1908
- if (unlikely (!(req -> file -> f_mode & FMODE_WRITE )))
1909
- return - EBADF ;
1900
+ if (unlikely (!(req -> file -> f_mode & FMODE_WRITE )))
1901
+ return - EBADF ;
1902
+ }
1910
1903
1911
1904
return io_import_iovec (WRITE , req , iovec , iter );
1912
1905
}
@@ -1920,15 +1913,9 @@ static int io_write(struct io_kiocb *req, struct io_kiocb **nxt,
1920
1913
size_t iov_count ;
1921
1914
ssize_t ret , io_size ;
1922
1915
1923
- if (!req -> io ) {
1924
- ret = io_write_prep (req , & iovec , & iter , force_nonblock );
1925
- if (ret < 0 )
1926
- return ret ;
1927
- } else {
1928
- ret = io_import_iovec (WRITE , req , & iovec , & iter );
1929
- if (ret < 0 )
1930
- return ret ;
1931
- }
1916
+ ret = io_write_prep (req , & iovec , & iter , force_nonblock );
1917
+ if (ret < 0 )
1918
+ return ret ;
1932
1919
1933
1920
/* Ensure we clear previously set non-block flag */
1934
1921
if (!force_nonblock )
@@ -2013,7 +2000,7 @@ static int io_prep_fsync(struct io_kiocb *req)
2013
2000
const struct io_uring_sqe * sqe = req -> sqe ;
2014
2001
struct io_ring_ctx * ctx = req -> ctx ;
2015
2002
2016
- if (req -> flags & REQ_F_PREPPED )
2003
+ if (! req -> sqe )
2017
2004
return 0 ;
2018
2005
if (!req -> file )
2019
2006
return - EBADF ;
@@ -2029,7 +2016,7 @@ static int io_prep_fsync(struct io_kiocb *req)
2029
2016
2030
2017
req -> sync .off = READ_ONCE (sqe -> off );
2031
2018
req -> sync .len = READ_ONCE (sqe -> len );
2032
- req -> flags |= REQ_F_PREPPED ;
2019
+ req -> sqe = NULL ;
2033
2020
return 0 ;
2034
2021
}
2035
2022
@@ -2095,7 +2082,7 @@ static int io_prep_sfr(struct io_kiocb *req)
2095
2082
const struct io_uring_sqe * sqe = req -> sqe ;
2096
2083
struct io_ring_ctx * ctx = req -> ctx ;
2097
2084
2098
- if (req -> flags & REQ_F_PREPPED )
2085
+ if (! sqe )
2099
2086
return 0 ;
2100
2087
if (!req -> file )
2101
2088
return - EBADF ;
@@ -2108,7 +2095,7 @@ static int io_prep_sfr(struct io_kiocb *req)
2108
2095
req -> sync .off = READ_ONCE (sqe -> off );
2109
2096
req -> sync .len = READ_ONCE (sqe -> len );
2110
2097
req -> sync .flags = READ_ONCE (sqe -> sync_range_flags );
2111
- req -> flags |= REQ_F_PREPPED ;
2098
+ req -> sqe = NULL ;
2112
2099
return 0 ;
2113
2100
}
2114
2101
@@ -2173,12 +2160,17 @@ static int io_sendmsg_prep(struct io_kiocb *req, struct io_async_ctx *io)
2173
2160
#if defined(CONFIG_NET )
2174
2161
const struct io_uring_sqe * sqe = req -> sqe ;
2175
2162
struct io_sr_msg * sr = & req -> sr_msg ;
2163
+ int ret ;
2176
2164
2165
+ if (!sqe )
2166
+ return 0 ;
2177
2167
sr -> msg_flags = READ_ONCE (sqe -> msg_flags );
2178
2168
sr -> msg = u64_to_user_ptr (READ_ONCE (sqe -> addr ));
2179
2169
io -> msg .iov = io -> msg .fast_iov ;
2180
- return sendmsg_copy_msghdr (& io -> msg .msg , sr -> msg , sr -> msg_flags ,
2170
+ ret = sendmsg_copy_msghdr (& io -> msg .msg , sr -> msg , sr -> msg_flags ,
2181
2171
& io -> msg .iov );
2172
+ req -> sqe = NULL ;
2173
+ return ret ;
2182
2174
#else
2183
2175
return - EOPNOTSUPP ;
2184
2176
#endif
@@ -2253,12 +2245,18 @@ static int io_recvmsg_prep(struct io_kiocb *req, struct io_async_ctx *io)
2253
2245
{
2254
2246
#if defined(CONFIG_NET )
2255
2247
struct io_sr_msg * sr = & req -> sr_msg ;
2248
+ int ret ;
2249
+
2250
+ if (!req -> sqe )
2251
+ return 0 ;
2256
2252
2257
2253
sr -> msg_flags = READ_ONCE (req -> sqe -> msg_flags );
2258
2254
sr -> msg = u64_to_user_ptr (READ_ONCE (req -> sqe -> addr ));
2259
2255
io -> msg .iov = io -> msg .fast_iov ;
2260
- return recvmsg_copy_msghdr (& io -> msg .msg , sr -> msg , sr -> msg_flags ,
2256
+ ret = recvmsg_copy_msghdr (& io -> msg .msg , sr -> msg , sr -> msg_flags ,
2261
2257
& io -> msg .uaddr , & io -> msg .iov );
2258
+ req -> sqe = NULL ;
2259
+ return ret ;
2262
2260
#else
2263
2261
return - EOPNOTSUPP ;
2264
2262
#endif
@@ -2336,7 +2334,7 @@ static int io_accept_prep(struct io_kiocb *req)
2336
2334
const struct io_uring_sqe * sqe = req -> sqe ;
2337
2335
struct io_accept * accept = & req -> accept ;
2338
2336
2339
- if (req -> flags & REQ_F_PREPPED )
2337
+ if (! req -> sqe )
2340
2338
return 0 ;
2341
2339
2342
2340
if (unlikely (req -> ctx -> flags & (IORING_SETUP_IOPOLL |IORING_SETUP_SQPOLL )))
@@ -2347,7 +2345,7 @@ static int io_accept_prep(struct io_kiocb *req)
2347
2345
accept -> addr = u64_to_user_ptr (READ_ONCE (sqe -> addr ));
2348
2346
accept -> addr_len = u64_to_user_ptr (READ_ONCE (sqe -> addr2 ));
2349
2347
accept -> flags = READ_ONCE (sqe -> accept_flags );
2350
- req -> flags |= REQ_F_PREPPED ;
2348
+ req -> sqe = NULL ;
2351
2349
return 0 ;
2352
2350
#else
2353
2351
return - EOPNOTSUPP ;
@@ -2416,16 +2414,21 @@ static int io_connect_prep(struct io_kiocb *req, struct io_async_ctx *io)
2416
2414
{
2417
2415
#if defined(CONFIG_NET )
2418
2416
const struct io_uring_sqe * sqe = req -> sqe ;
2417
+ int ret ;
2419
2418
2419
+ if (!sqe )
2420
+ return 0 ;
2420
2421
if (unlikely (req -> ctx -> flags & (IORING_SETUP_IOPOLL |IORING_SETUP_SQPOLL )))
2421
2422
return - EINVAL ;
2422
2423
if (sqe -> ioprio || sqe -> len || sqe -> buf_index || sqe -> rw_flags )
2423
2424
return - EINVAL ;
2424
2425
2425
2426
req -> connect .addr = u64_to_user_ptr (READ_ONCE (sqe -> addr ));
2426
2427
req -> connect .addr_len = READ_ONCE (sqe -> addr2 );
2427
- return move_addr_to_kernel (req -> connect .addr , req -> connect .addr_len ,
2428
+ ret = move_addr_to_kernel (req -> connect .addr , req -> connect .addr_len ,
2428
2429
& io -> connect .address );
2430
+ req -> sqe = NULL ;
2431
+ return ret ;
2429
2432
#else
2430
2433
return - EOPNOTSUPP ;
2431
2434
#endif
@@ -2526,7 +2529,7 @@ static int io_poll_remove_prep(struct io_kiocb *req)
2526
2529
{
2527
2530
const struct io_uring_sqe * sqe = req -> sqe ;
2528
2531
2529
- if (req -> flags & REQ_F_PREPPED )
2532
+ if (! sqe )
2530
2533
return 0 ;
2531
2534
if (unlikely (req -> ctx -> flags & IORING_SETUP_IOPOLL ))
2532
2535
return - EINVAL ;
@@ -2535,7 +2538,7 @@ static int io_poll_remove_prep(struct io_kiocb *req)
2535
2538
return - EINVAL ;
2536
2539
2537
2540
req -> poll .addr = READ_ONCE (sqe -> addr );
2538
- req -> flags |= REQ_F_PREPPED ;
2541
+ req -> sqe = NULL ;
2539
2542
return 0 ;
2540
2543
}
2541
2544
@@ -2696,7 +2699,7 @@ static int io_poll_add_prep(struct io_kiocb *req)
2696
2699
struct io_poll_iocb * poll = & req -> poll ;
2697
2700
u16 events ;
2698
2701
2699
- if (req -> flags & REQ_F_PREPPED )
2702
+ if (! sqe )
2700
2703
return 0 ;
2701
2704
if (unlikely (req -> ctx -> flags & IORING_SETUP_IOPOLL ))
2702
2705
return - EINVAL ;
@@ -2705,9 +2708,9 @@ static int io_poll_add_prep(struct io_kiocb *req)
2705
2708
if (!poll -> file )
2706
2709
return - EBADF ;
2707
2710
2708
- req -> flags |= REQ_F_PREPPED ;
2709
2711
events = READ_ONCE (sqe -> poll_events );
2710
2712
poll -> events = demangle_poll (events ) | EPOLLERR | EPOLLHUP ;
2713
+ req -> sqe = NULL ;
2711
2714
return 0 ;
2712
2715
}
2713
2716
@@ -2845,7 +2848,7 @@ static int io_timeout_remove_prep(struct io_kiocb *req)
2845
2848
{
2846
2849
const struct io_uring_sqe * sqe = req -> sqe ;
2847
2850
2848
- if (req -> flags & REQ_F_PREPPED )
2851
+ if (! sqe )
2849
2852
return 0 ;
2850
2853
if (unlikely (req -> ctx -> flags & IORING_SETUP_IOPOLL ))
2851
2854
return - EINVAL ;
@@ -2857,7 +2860,7 @@ static int io_timeout_remove_prep(struct io_kiocb *req)
2857
2860
if (req -> timeout .flags )
2858
2861
return - EINVAL ;
2859
2862
2860
- req -> flags |= REQ_F_PREPPED ;
2863
+ req -> sqe = NULL ;
2861
2864
return 0 ;
2862
2865
}
2863
2866
@@ -2893,6 +2896,8 @@ static int io_timeout_prep(struct io_kiocb *req, struct io_async_ctx *io,
2893
2896
struct io_timeout_data * data ;
2894
2897
unsigned flags ;
2895
2898
2899
+ if (!sqe )
2900
+ return 0 ;
2896
2901
if (unlikely (req -> ctx -> flags & IORING_SETUP_IOPOLL ))
2897
2902
return - EINVAL ;
2898
2903
if (sqe -> ioprio || sqe -> buf_index || sqe -> len != 1 )
@@ -2921,6 +2926,7 @@ static int io_timeout_prep(struct io_kiocb *req, struct io_async_ctx *io,
2921
2926
data -> mode = HRTIMER_MODE_REL ;
2922
2927
2923
2928
hrtimer_init (& data -> timer , CLOCK_MONOTONIC , data -> mode );
2929
+ req -> sqe = NULL ;
2924
2930
return 0 ;
2925
2931
}
2926
2932
@@ -2933,13 +2939,9 @@ static int io_timeout(struct io_kiocb *req)
2933
2939
unsigned span = 0 ;
2934
2940
int ret ;
2935
2941
2936
- if (!req -> io ) {
2937
- if (io_alloc_async_ctx (req ))
2938
- return - ENOMEM ;
2939
- ret = io_timeout_prep (req , req -> io , false);
2940
- if (ret )
2941
- return ret ;
2942
- }
2942
+ ret = io_timeout_prep (req , req -> io , false);
2943
+ if (ret )
2944
+ return ret ;
2943
2945
data = & req -> io -> timeout ;
2944
2946
2945
2947
/*
@@ -3069,16 +3071,16 @@ static int io_async_cancel_prep(struct io_kiocb *req)
3069
3071
{
3070
3072
const struct io_uring_sqe * sqe = req -> sqe ;
3071
3073
3072
- if (req -> flags & REQ_F_PREPPED )
3074
+ if (! sqe )
3073
3075
return 0 ;
3074
3076
if (unlikely (req -> ctx -> flags & IORING_SETUP_IOPOLL ))
3075
3077
return - EINVAL ;
3076
3078
if (sqe -> flags || sqe -> ioprio || sqe -> off || sqe -> len ||
3077
3079
sqe -> cancel_flags )
3078
3080
return - EINVAL ;
3079
3081
3080
- req -> flags |= REQ_F_PREPPED ;
3081
3082
req -> cancel .addr = READ_ONCE (sqe -> addr );
3083
+ req -> sqe = NULL ;
3082
3084
return 0 ;
3083
3085
}
3084
3086
@@ -3213,13 +3215,9 @@ static int io_issue_sqe(struct io_kiocb *req, struct io_kiocb **nxt,
3213
3215
ret = io_nop (req );
3214
3216
break ;
3215
3217
case IORING_OP_READV :
3216
- if (unlikely (req -> sqe -> buf_index ))
3217
- return - EINVAL ;
3218
3218
ret = io_read (req , nxt , force_nonblock );
3219
3219
break ;
3220
3220
case IORING_OP_WRITEV :
3221
- if (unlikely (req -> sqe -> buf_index ))
3222
- return - EINVAL ;
3223
3221
ret = io_write (req , nxt , force_nonblock );
3224
3222
break ;
3225
3223
case IORING_OP_READ_FIXED :
0 commit comments