@@ -2181,6 +2181,13 @@ static void io_init_req_drain(struct io_kiocb *req)
2181
2181
}
2182
2182
}
2183
2183
2184
+ static __cold int io_init_fail_req (struct io_kiocb * req , int err )
2185
+ {
2186
+ /* ensure per-opcode data is cleared if we fail before prep */
2187
+ memset (& req -> cmd .data , 0 , sizeof (req -> cmd .data ));
2188
+ return err ;
2189
+ }
2190
+
2184
2191
static int io_init_req (struct io_ring_ctx * ctx , struct io_kiocb * req ,
2185
2192
const struct io_uring_sqe * sqe )
2186
2193
__must_hold (& ctx - > uring_lock )
@@ -2202,29 +2209,29 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
2202
2209
2203
2210
if (unlikely (opcode >= IORING_OP_LAST )) {
2204
2211
req -> opcode = 0 ;
2205
- return - EINVAL ;
2212
+ return io_init_fail_req ( req , - EINVAL ) ;
2206
2213
}
2207
2214
def = & io_issue_defs [opcode ];
2208
2215
if (unlikely (sqe_flags & ~SQE_COMMON_FLAGS )) {
2209
2216
/* enforce forwards compatibility on users */
2210
2217
if (sqe_flags & ~SQE_VALID_FLAGS )
2211
- return - EINVAL ;
2218
+ return io_init_fail_req ( req , - EINVAL ) ;
2212
2219
if (sqe_flags & IOSQE_BUFFER_SELECT ) {
2213
2220
if (!def -> buffer_select )
2214
- return - EOPNOTSUPP ;
2221
+ return io_init_fail_req ( req , - EOPNOTSUPP ) ;
2215
2222
req -> buf_index = READ_ONCE (sqe -> buf_group );
2216
2223
}
2217
2224
if (sqe_flags & IOSQE_CQE_SKIP_SUCCESS )
2218
2225
ctx -> drain_disabled = true;
2219
2226
if (sqe_flags & IOSQE_IO_DRAIN ) {
2220
2227
if (ctx -> drain_disabled )
2221
- return - EOPNOTSUPP ;
2228
+ return io_init_fail_req ( req , - EOPNOTSUPP ) ;
2222
2229
io_init_req_drain (req );
2223
2230
}
2224
2231
}
2225
2232
if (unlikely (ctx -> restricted || ctx -> drain_active || ctx -> drain_next )) {
2226
2233
if (ctx -> restricted && !io_check_restriction (ctx , req , sqe_flags ))
2227
- return - EACCES ;
2234
+ return io_init_fail_req ( req , - EACCES ) ;
2228
2235
/* knock it to the slow queue path, will be drained there */
2229
2236
if (ctx -> drain_active )
2230
2237
req -> flags |= REQ_F_FORCE_ASYNC ;
@@ -2237,9 +2244,9 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
2237
2244
}
2238
2245
2239
2246
if (!def -> ioprio && sqe -> ioprio )
2240
- return - EINVAL ;
2247
+ return io_init_fail_req ( req , - EINVAL ) ;
2241
2248
if (!def -> iopoll && (ctx -> flags & IORING_SETUP_IOPOLL ))
2242
- return - EINVAL ;
2249
+ return io_init_fail_req ( req , - EINVAL ) ;
2243
2250
2244
2251
if (def -> needs_file ) {
2245
2252
struct io_submit_state * state = & ctx -> submit_state ;
@@ -2263,12 +2270,12 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
2263
2270
2264
2271
req -> creds = xa_load (& ctx -> personalities , personality );
2265
2272
if (!req -> creds )
2266
- return - EINVAL ;
2273
+ return io_init_fail_req ( req , - EINVAL ) ;
2267
2274
get_cred (req -> creds );
2268
2275
ret = security_uring_override_creds (req -> creds );
2269
2276
if (ret ) {
2270
2277
put_cred (req -> creds );
2271
- return ret ;
2278
+ return io_init_fail_req ( req , ret ) ;
2272
2279
}
2273
2280
req -> flags |= REQ_F_CREDS ;
2274
2281
}
0 commit comments