@@ -1294,6 +1294,17 @@ static void io_queue_async_work(struct io_kiocb *req)
12941294
12951295 /* init ->work of the whole link before punting */
12961296 io_prep_async_link (req );
1297+
1298+ /*
1299+ * Not expected to happen, but if we do have a bug where this _can_
1300+ * happen, catch it here and ensure the request is marked as
1301+ * canceled. That will make io-wq go through the usual work cancel
1302+ * procedure rather than attempt to run this request (or create a new
1303+ * worker for it).
1304+ */
1305+ if (WARN_ON_ONCE (!same_thread_group (req -> task , current )))
1306+ req -> work .flags |= IO_WQ_WORK_CANCEL ;
1307+
12971308 trace_io_uring_queue_async_work (ctx , io_wq_is_hashed (& req -> work ), req ,
12981309 & req -> work , req -> flags );
12991310 io_wq_enqueue (tctx -> io_wq , & req -> work );
@@ -2205,7 +2216,7 @@ static inline bool io_run_task_work(void)
22052216 * Find and free completed poll iocbs
22062217 */
22072218static void io_iopoll_complete (struct io_ring_ctx * ctx , unsigned int * nr_events ,
2208- struct list_head * done )
2219+ struct list_head * done , bool resubmit )
22092220{
22102221 struct req_batch rb ;
22112222 struct io_kiocb * req ;
@@ -2220,7 +2231,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
22202231 req = list_first_entry (done , struct io_kiocb , inflight_entry );
22212232 list_del (& req -> inflight_entry );
22222233
2223- if (READ_ONCE (req -> result ) == - EAGAIN &&
2234+ if (READ_ONCE (req -> result ) == - EAGAIN && resubmit &&
22242235 !(req -> flags & REQ_F_DONT_REISSUE )) {
22252236 req -> iopoll_completed = 0 ;
22262237 req_ref_get (req );
@@ -2244,7 +2255,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
22442255}
22452256
22462257static int io_do_iopoll (struct io_ring_ctx * ctx , unsigned int * nr_events ,
2247- long min )
2258+ long min , bool resubmit )
22482259{
22492260 struct io_kiocb * req , * tmp ;
22502261 LIST_HEAD (done );
@@ -2287,7 +2298,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
22872298 }
22882299
22892300 if (!list_empty (& done ))
2290- io_iopoll_complete (ctx , nr_events , & done );
2301+ io_iopoll_complete (ctx , nr_events , & done , resubmit );
22912302
22922303 return ret ;
22932304}
@@ -2305,7 +2316,7 @@ static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
23052316 while (!list_empty (& ctx -> iopoll_list )) {
23062317 unsigned int nr_events = 0 ;
23072318
2308- io_do_iopoll (ctx , & nr_events , 0 );
2319+ io_do_iopoll (ctx , & nr_events , 0 , false );
23092320
23102321 /* let it sleep and repeat later if can't complete a request */
23112322 if (nr_events == 0 )
@@ -2367,7 +2378,7 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
23672378 list_empty (& ctx -> iopoll_list ))
23682379 break ;
23692380 }
2370- ret = io_do_iopoll (ctx , & nr_events , min );
2381+ ret = io_do_iopoll (ctx , & nr_events , min , true );
23712382 } while (!ret && nr_events < min && !need_resched ());
23722383out :
23732384 mutex_unlock (& ctx -> uring_lock );
@@ -4802,6 +4813,7 @@ IO_NETOP_FN(recv);
48024813struct io_poll_table {
48034814 struct poll_table_struct pt ;
48044815 struct io_kiocb * req ;
4816+ int nr_entries ;
48054817 int error ;
48064818};
48074819
@@ -4995,11 +5007,11 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
49955007 struct io_kiocb * req = pt -> req ;
49965008
49975009 /*
4998- * If poll->head is already set, it's because the file being polled
4999- * uses multiple waitqueues for poll handling (eg one for read, one
5000- * for write). Setup a separate io_poll_iocb if this happens.
5010+ * The file being polled uses multiple waitqueues for poll handling
5011+ * (e.g. one for read, one for write). Setup a separate io_poll_iocb
5012+ * if this happens.
50015013 */
5002- if (unlikely (poll -> head )) {
5014+ if (unlikely (pt -> nr_entries )) {
50035015 struct io_poll_iocb * poll_one = poll ;
50045016
50055017 /* already have a 2nd entry, fail a third attempt */
@@ -5027,7 +5039,7 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
50275039 * poll_ptr = poll ;
50285040 }
50295041
5030- pt -> error = 0 ;
5042+ pt -> nr_entries ++ ;
50315043 poll -> head = head ;
50325044
50335045 if (poll -> events & EPOLLEXCLUSIVE )
@@ -5104,11 +5116,16 @@ static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
51045116
51055117 ipt -> pt ._key = mask ;
51065118 ipt -> req = req ;
5107- ipt -> error = - EINVAL ;
5119+ ipt -> error = 0 ;
5120+ ipt -> nr_entries = 0 ;
51085121
51095122 mask = vfs_poll (req -> file , & ipt -> pt ) & poll -> events ;
5123+ if (unlikely (!ipt -> nr_entries ) && !ipt -> error )
5124+ ipt -> error = - EINVAL ;
51105125
51115126 spin_lock_irq (& ctx -> completion_lock );
5127+ if (ipt -> error )
5128+ io_poll_remove_double (req );
51125129 if (likely (poll -> head )) {
51135130 spin_lock (& poll -> head -> lock );
51145131 if (unlikely (list_empty (& poll -> wait .entry ))) {
@@ -6792,7 +6809,7 @@ static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
67926809
67936810 mutex_lock (& ctx -> uring_lock );
67946811 if (!list_empty (& ctx -> iopoll_list ))
6795- io_do_iopoll (ctx , & nr_events , 0 );
6812+ io_do_iopoll (ctx , & nr_events , 0 , true );
67966813
67976814 /*
67986815 * Don't submit if refs are dying, good for io_uring_register(),
@@ -7899,15 +7916,19 @@ static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx,
78997916 struct io_wq_data data ;
79007917 unsigned int concurrency ;
79017918
7919+ mutex_lock (& ctx -> uring_lock );
79027920 hash = ctx -> hash_map ;
79037921 if (!hash ) {
79047922 hash = kzalloc (sizeof (* hash ), GFP_KERNEL );
7905- if (!hash )
7923+ if (!hash ) {
7924+ mutex_unlock (& ctx -> uring_lock );
79067925 return ERR_PTR (- ENOMEM );
7926+ }
79077927 refcount_set (& hash -> refs , 1 );
79087928 init_waitqueue_head (& hash -> wait );
79097929 ctx -> hash_map = hash ;
79107930 }
7931+ mutex_unlock (& ctx -> uring_lock );
79117932
79127933 data .hash = hash ;
79137934 data .task = task ;
@@ -7981,9 +8002,11 @@ static int io_sq_offload_create(struct io_ring_ctx *ctx,
79818002 f = fdget (p -> wq_fd );
79828003 if (!f .file )
79838004 return - ENXIO ;
7984- fdput ( f );
7985- if ( f . file -> f_op != & io_uring_fops )
8005+ if ( f . file -> f_op != & io_uring_fops ) {
8006+ fdput ( f );
79868007 return - EINVAL ;
8008+ }
8009+ fdput (f );
79878010 }
79888011 if (ctx -> flags & IORING_SETUP_SQPOLL ) {
79898012 struct task_struct * tsk ;
0 commit comments