@@ -83,18 +83,6 @@ int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
83
83
/* used for fixed read/write too - just read unconditionally */
84
84
req -> buf_index = READ_ONCE (sqe -> buf_index );
85
85
86
- if (req -> opcode == IORING_OP_READ_FIXED ||
87
- req -> opcode == IORING_OP_WRITE_FIXED ) {
88
- struct io_ring_ctx * ctx = req -> ctx ;
89
- u16 index ;
90
-
91
- if (unlikely (req -> buf_index >= ctx -> nr_user_bufs ))
92
- return - EFAULT ;
93
- index = array_index_nospec (req -> buf_index , ctx -> nr_user_bufs );
94
- req -> imu = ctx -> user_bufs [index ];
95
- io_req_set_rsrc_node (req , ctx , 0 );
96
- }
97
-
98
86
ioprio = READ_ONCE (sqe -> ioprio );
99
87
if (ioprio ) {
100
88
ret = ioprio_check_cap (ioprio );
@@ -110,16 +98,42 @@ int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
110
98
rw -> addr = READ_ONCE (sqe -> addr );
111
99
rw -> len = READ_ONCE (sqe -> len );
112
100
rw -> flags = READ_ONCE (sqe -> rw_flags );
101
+ return 0 ;
102
+ }
103
+
104
+ int io_prep_rwv (struct io_kiocb * req , const struct io_uring_sqe * sqe )
105
+ {
106
+ int ret ;
107
+
108
+ ret = io_prep_rw (req , sqe );
109
+ if (unlikely (ret ))
110
+ return ret ;
113
111
114
- /* Have to do this validation here, as this is in io_read() rw->len might
115
- * have chanaged due to buffer selection
112
+ /*
113
+ * Have to do this validation here, as this is in io_read() rw->len
114
+ * might have chanaged due to buffer selection
116
115
*/
117
- if (req -> opcode == IORING_OP_READV && req -> flags & REQ_F_BUFFER_SELECT ) {
118
- ret = io_iov_buffer_select_prep (req );
119
- if ( ret )
120
- return ret ;
121
- }
116
+ if (req -> flags & REQ_F_BUFFER_SELECT )
117
+ return io_iov_buffer_select_prep (req );
118
+
119
+ return 0 ;
120
+ }
122
121
122
+ int io_prep_rw_fixed (struct io_kiocb * req , const struct io_uring_sqe * sqe )
123
+ {
124
+ struct io_ring_ctx * ctx = req -> ctx ;
125
+ u16 index ;
126
+ int ret ;
127
+
128
+ ret = io_prep_rw (req , sqe );
129
+ if (unlikely (ret ))
130
+ return ret ;
131
+
132
+ if (unlikely (req -> buf_index >= ctx -> nr_user_bufs ))
133
+ return - EFAULT ;
134
+ index = array_index_nospec (req -> buf_index , ctx -> nr_user_bufs );
135
+ req -> imu = ctx -> user_bufs [index ];
136
+ io_req_set_rsrc_node (req , ctx , 0 );
123
137
return 0 ;
124
138
}
125
139
@@ -129,12 +143,20 @@ int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
129
143
*/
130
144
int io_read_mshot_prep (struct io_kiocb * req , const struct io_uring_sqe * sqe )
131
145
{
146
+ struct io_rw * rw = io_kiocb_to_cmd (req , struct io_rw );
132
147
int ret ;
133
148
149
+ /* must be used with provided buffers */
150
+ if (!(req -> flags & REQ_F_BUFFER_SELECT ))
151
+ return - EINVAL ;
152
+
134
153
ret = io_prep_rw (req , sqe );
135
154
if (unlikely (ret ))
136
155
return ret ;
137
156
157
+ if (rw -> addr || rw -> len )
158
+ return - EINVAL ;
159
+
138
160
req -> flags |= REQ_F_APOLL_MULTISHOT ;
139
161
return 0 ;
140
162
}
@@ -542,6 +564,9 @@ static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
542
564
{
543
565
if (!force && !io_cold_defs [req -> opcode ].prep_async )
544
566
return 0 ;
567
+ /* opcode type doesn't need async data */
568
+ if (!io_cold_defs [req -> opcode ].async_size )
569
+ return 0 ;
545
570
if (!req_has_async_data (req )) {
546
571
struct io_async_rw * iorw ;
547
572
@@ -887,6 +912,7 @@ int io_read(struct io_kiocb *req, unsigned int issue_flags)
887
912
888
913
int io_read_mshot (struct io_kiocb * req , unsigned int issue_flags )
889
914
{
915
+ struct io_rw * rw = io_kiocb_to_cmd (req , struct io_rw );
890
916
unsigned int cflags = 0 ;
891
917
int ret ;
892
918
@@ -903,7 +929,12 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
903
929
* handling arm it.
904
930
*/
905
931
if (ret == - EAGAIN ) {
906
- io_kbuf_recycle (req , issue_flags );
932
+ /*
933
+ * Reset rw->len to 0 again to avoid clamping future mshot
934
+ * reads, in case the buffer size varies.
935
+ */
936
+ if (io_kbuf_recycle (req , issue_flags ))
937
+ rw -> len = 0 ;
907
938
return - EAGAIN ;
908
939
}
909
940
@@ -916,6 +947,7 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
916
947
* jump to the termination path. This request is then done.
917
948
*/
918
949
cflags = io_put_kbuf (req , issue_flags );
950
+ rw -> len = 0 ; /* similarly to above, reset len to 0 */
919
951
920
952
if (io_fill_cqe_req_aux (req ,
921
953
issue_flags & IO_URING_F_COMPLETE_DEFER ,
0 commit comments