@@ -619,6 +619,8 @@ struct io_kiocb {
619
619
bool needs_fixed_file ;
620
620
u8 opcode ;
621
621
622
+ u16 buf_index ;
623
+
622
624
struct io_ring_ctx * ctx ;
623
625
struct list_head list ;
624
626
unsigned int flags ;
@@ -924,6 +926,7 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
924
926
goto err ;
925
927
926
928
ctx -> flags = p -> flags ;
929
+ init_waitqueue_head (& ctx -> sqo_wait );
927
930
init_waitqueue_head (& ctx -> cq_wait );
928
931
INIT_LIST_HEAD (& ctx -> cq_overflow_list );
929
932
init_completion (& ctx -> completions [0 ]);
@@ -2100,9 +2103,7 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2100
2103
2101
2104
req -> rw .addr = READ_ONCE (sqe -> addr );
2102
2105
req -> rw .len = READ_ONCE (sqe -> len );
2103
- /* we own ->private, reuse it for the buffer index / buffer ID */
2104
- req -> rw .kiocb .private = (void * ) (unsigned long )
2105
- READ_ONCE (sqe -> buf_index );
2106
+ req -> buf_index = READ_ONCE (sqe -> buf_index );
2106
2107
return 0 ;
2107
2108
}
2108
2109
@@ -2145,15 +2146,15 @@ static ssize_t io_import_fixed(struct io_kiocb *req, int rw,
2145
2146
struct io_ring_ctx * ctx = req -> ctx ;
2146
2147
size_t len = req -> rw .len ;
2147
2148
struct io_mapped_ubuf * imu ;
2148
- unsigned index , buf_index ;
2149
+ u16 index , buf_index ;
2149
2150
size_t offset ;
2150
2151
u64 buf_addr ;
2151
2152
2152
2153
/* attempt to use fixed buffers without having provided iovecs */
2153
2154
if (unlikely (!ctx -> user_bufs ))
2154
2155
return - EFAULT ;
2155
2156
2156
- buf_index = ( unsigned long ) req -> rw . kiocb . private ;
2157
+ buf_index = req -> buf_index ;
2157
2158
if (unlikely (buf_index >= ctx -> nr_user_bufs ))
2158
2159
return - EFAULT ;
2159
2160
@@ -2269,10 +2270,10 @@ static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len,
2269
2270
bool needs_lock )
2270
2271
{
2271
2272
struct io_buffer * kbuf ;
2272
- int bgid ;
2273
+ u16 bgid ;
2273
2274
2274
2275
kbuf = (struct io_buffer * ) (unsigned long ) req -> rw .addr ;
2275
- bgid = ( int ) ( unsigned long ) req -> rw . kiocb . private ;
2276
+ bgid = req -> buf_index ;
2276
2277
kbuf = io_buffer_select (req , len , bgid , kbuf , needs_lock );
2277
2278
if (IS_ERR (kbuf ))
2278
2279
return kbuf ;
@@ -2363,7 +2364,7 @@ static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
2363
2364
}
2364
2365
2365
2366
/* buffer index only valid with fixed read/write, or buffer select */
2366
- if (req -> rw . kiocb . private && !(req -> flags & REQ_F_BUFFER_SELECT ))
2367
+ if (req -> buf_index && !(req -> flags & REQ_F_BUFFER_SELECT ))
2367
2368
return - EINVAL ;
2368
2369
2369
2370
if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE ) {
@@ -2771,11 +2772,8 @@ static int io_splice(struct io_kiocb *req, bool force_nonblock)
2771
2772
poff_in = (sp -> off_in == -1 ) ? NULL : & sp -> off_in ;
2772
2773
poff_out = (sp -> off_out == -1 ) ? NULL : & sp -> off_out ;
2773
2774
2774
- if (sp -> len ) {
2775
+ if (sp -> len )
2775
2776
ret = do_splice (in , poff_in , out , poff_out , sp -> len , flags );
2776
- if (force_nonblock && ret == - EAGAIN )
2777
- return - EAGAIN ;
2778
- }
2779
2777
2780
2778
io_put_file (req , in , (sp -> flags & SPLICE_F_FD_IN_FIXED ));
2781
2779
req -> flags &= ~REQ_F_NEED_CLEANUP ;
@@ -4137,12 +4135,14 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
4137
4135
req -> result = mask ;
4138
4136
init_task_work (& req -> task_work , func );
4139
4137
/*
4140
- * If this fails, then the task is exiting. Punt to one of the io-wq
4141
- * threads to ensure the work gets run, we can't always rely on exit
4142
- * cancelation taking care of this.
4138
+ * If this fails, then the task is exiting. When a task exits, the
4139
+ * work gets canceled, so just cancel this request as well instead
4140
+ * of executing it. We can't safely execute it anyway, as we may not
4141
+ * have the needed state needed for it anyway.
4143
4142
*/
4144
4143
ret = task_work_add (tsk , & req -> task_work , true);
4145
4144
if (unlikely (ret )) {
4145
+ WRITE_ONCE (poll -> canceled , true);
4146
4146
tsk = io_wq_get_task (req -> ctx -> io_wq );
4147
4147
task_work_add (tsk , & req -> task_work , true);
4148
4148
}
@@ -5013,12 +5013,13 @@ static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5013
5013
if (!req_need_defer (req ) && list_empty_careful (& ctx -> defer_list ))
5014
5014
return 0 ;
5015
5015
5016
- if (!req -> io && io_alloc_async_ctx (req ))
5017
- return - EAGAIN ;
5018
-
5019
- ret = io_req_defer_prep (req , sqe );
5020
- if (ret < 0 )
5021
- return ret ;
5016
+ if (!req -> io ) {
5017
+ if (io_alloc_async_ctx (req ))
5018
+ return - EAGAIN ;
5019
+ ret = io_req_defer_prep (req , sqe );
5020
+ if (ret < 0 )
5021
+ return ret ;
5022
+ }
5022
5023
5023
5024
spin_lock_irq (& ctx -> completion_lock );
5024
5025
if (!req_need_defer (req ) && list_empty (& ctx -> defer_list )) {
@@ -5305,7 +5306,8 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
5305
5306
if (ret )
5306
5307
return ret ;
5307
5308
5308
- if (ctx -> flags & IORING_SETUP_IOPOLL ) {
5309
+ /* If the op doesn't have a file, we're not polling for it */
5310
+ if ((ctx -> flags & IORING_SETUP_IOPOLL ) && req -> file ) {
5309
5311
const bool in_async = io_wq_current_is_worker ();
5310
5312
5311
5313
if (req -> result == - EAGAIN )
@@ -5606,9 +5608,15 @@ static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5606
5608
io_double_put_req (req );
5607
5609
}
5608
5610
} else if (req -> flags & REQ_F_FORCE_ASYNC ) {
5609
- ret = io_req_defer_prep (req , sqe );
5610
- if (unlikely (ret < 0 ))
5611
- goto fail_req ;
5611
+ if (!req -> io ) {
5612
+ ret = - EAGAIN ;
5613
+ if (io_alloc_async_ctx (req ))
5614
+ goto fail_req ;
5615
+ ret = io_req_defer_prep (req , sqe );
5616
+ if (unlikely (ret < 0 ))
5617
+ goto fail_req ;
5618
+ }
5619
+
5612
5620
/*
5613
5621
* Never try inline submit of IOSQE_ASYNC is set, go straight
5614
5622
* to async execution.
@@ -6024,6 +6032,7 @@ static int io_sq_thread(void *data)
6024
6032
finish_wait (& ctx -> sqo_wait , & wait );
6025
6033
6026
6034
ctx -> rings -> sq_flags &= ~IORING_SQ_NEED_WAKEUP ;
6035
+ ret = 0 ;
6027
6036
continue ;
6028
6037
}
6029
6038
finish_wait (& ctx -> sqo_wait , & wait );
@@ -6837,7 +6846,6 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx,
6837
6846
{
6838
6847
int ret ;
6839
6848
6840
- init_waitqueue_head (& ctx -> sqo_wait );
6841
6849
mmgrab (current -> mm );
6842
6850
ctx -> sqo_mm = current -> mm ;
6843
6851
0 commit comments