@@ -605,6 +605,7 @@ enum {
605
605
606
606
struct async_poll {
607
607
struct io_poll_iocb poll ;
608
+ struct io_poll_iocb * double_poll ;
608
609
struct io_wq_work work ;
609
610
};
610
611
@@ -4159,9 +4160,9 @@ static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
4159
4160
return false;
4160
4161
}
4161
4162
4162
- static void io_poll_remove_double (struct io_kiocb * req )
4163
+ static void io_poll_remove_double (struct io_kiocb * req , void * data )
4163
4164
{
4164
- struct io_poll_iocb * poll = ( struct io_poll_iocb * ) req -> io ;
4165
+ struct io_poll_iocb * poll = data ;
4165
4166
4166
4167
lockdep_assert_held (& req -> ctx -> completion_lock );
4167
4168
@@ -4181,7 +4182,7 @@ static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error)
4181
4182
{
4182
4183
struct io_ring_ctx * ctx = req -> ctx ;
4183
4184
4184
- io_poll_remove_double (req );
4185
+ io_poll_remove_double (req , req -> io );
4185
4186
req -> poll .done = true;
4186
4187
io_cqring_fill_event (req , error ? error : mangle_poll (mask ));
4187
4188
io_commit_cqring (ctx );
@@ -4224,21 +4225,21 @@ static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
4224
4225
int sync , void * key )
4225
4226
{
4226
4227
struct io_kiocb * req = wait -> private ;
4227
- struct io_poll_iocb * poll = ( struct io_poll_iocb * ) req -> io ;
4228
+ struct io_poll_iocb * poll = req -> apoll -> double_poll ;
4228
4229
__poll_t mask = key_to_poll (key );
4229
4230
4230
4231
/* for instances that support it check for an event match first: */
4231
4232
if (mask && !(mask & poll -> events ))
4232
4233
return 0 ;
4233
4234
4234
- if (req -> poll . head ) {
4235
+ if (poll && poll -> head ) {
4235
4236
bool done ;
4236
4237
4237
- spin_lock (& req -> poll . head -> lock );
4238
- done = list_empty (& req -> poll . wait .entry );
4238
+ spin_lock (& poll -> head -> lock );
4239
+ done = list_empty (& poll -> wait .entry );
4239
4240
if (!done )
4240
- list_del_init (& req -> poll . wait .entry );
4241
- spin_unlock (& req -> poll . head -> lock );
4241
+ list_del_init (& poll -> wait .entry );
4242
+ spin_unlock (& poll -> head -> lock );
4242
4243
if (!done )
4243
4244
__io_async_wake (req , poll , mask , io_poll_task_func );
4244
4245
}
@@ -4258,7 +4259,8 @@ static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
4258
4259
}
4259
4260
4260
4261
static void __io_queue_proc (struct io_poll_iocb * poll , struct io_poll_table * pt ,
4261
- struct wait_queue_head * head )
4262
+ struct wait_queue_head * head ,
4263
+ struct io_poll_iocb * * poll_ptr )
4262
4264
{
4263
4265
struct io_kiocb * req = pt -> req ;
4264
4266
@@ -4269,7 +4271,7 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
4269
4271
*/
4270
4272
if (unlikely (poll -> head )) {
4271
4273
/* already have a 2nd entry, fail a third attempt */
4272
- if (req -> io ) {
4274
+ if (* poll_ptr ) {
4273
4275
pt -> error = - EINVAL ;
4274
4276
return ;
4275
4277
}
@@ -4281,7 +4283,7 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
4281
4283
io_init_poll_iocb (poll , req -> poll .events , io_poll_double_wake );
4282
4284
refcount_inc (& req -> refs );
4283
4285
poll -> wait .private = req ;
4284
- req -> io = ( void * ) poll ;
4286
+ * poll_ptr = poll ;
4285
4287
}
4286
4288
4287
4289
pt -> error = 0 ;
@@ -4293,8 +4295,9 @@ static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
4293
4295
struct poll_table_struct * p )
4294
4296
{
4295
4297
struct io_poll_table * pt = container_of (p , struct io_poll_table , pt );
4298
+ struct async_poll * apoll = pt -> req -> apoll ;
4296
4299
4297
- __io_queue_proc (& pt -> req -> apoll -> poll , pt , head );
4300
+ __io_queue_proc (& apoll -> poll , pt , head , & apoll -> double_poll );
4298
4301
}
4299
4302
4300
4303
static void io_sq_thread_drop_mm (struct io_ring_ctx * ctx )
@@ -4344,11 +4347,13 @@ static void io_async_task_func(struct callback_head *cb)
4344
4347
}
4345
4348
}
4346
4349
4350
+ io_poll_remove_double (req , apoll -> double_poll );
4347
4351
spin_unlock_irq (& ctx -> completion_lock );
4348
4352
4349
4353
/* restore ->work in case we need to retry again */
4350
4354
if (req -> flags & REQ_F_WORK_INITIALIZED )
4351
4355
memcpy (& req -> work , & apoll -> work , sizeof (req -> work ));
4356
+ kfree (apoll -> double_poll );
4352
4357
kfree (apoll );
4353
4358
4354
4359
if (!canceled ) {
@@ -4436,7 +4441,6 @@ static bool io_arm_poll_handler(struct io_kiocb *req)
4436
4441
struct async_poll * apoll ;
4437
4442
struct io_poll_table ipt ;
4438
4443
__poll_t mask , ret ;
4439
- bool had_io ;
4440
4444
4441
4445
if (!req -> file || !file_can_poll (req -> file ))
4442
4446
return false;
@@ -4448,11 +4452,11 @@ static bool io_arm_poll_handler(struct io_kiocb *req)
4448
4452
apoll = kmalloc (sizeof (* apoll ), GFP_ATOMIC );
4449
4453
if (unlikely (!apoll ))
4450
4454
return false;
4455
+ apoll -> double_poll = NULL ;
4451
4456
4452
4457
req -> flags |= REQ_F_POLLED ;
4453
4458
if (req -> flags & REQ_F_WORK_INITIALIZED )
4454
4459
memcpy (& apoll -> work , & req -> work , sizeof (req -> work ));
4455
- had_io = req -> io != NULL ;
4456
4460
4457
4461
io_get_req_task (req );
4458
4462
req -> apoll = apoll ;
@@ -4470,13 +4474,11 @@ static bool io_arm_poll_handler(struct io_kiocb *req)
4470
4474
ret = __io_arm_poll_handler (req , & apoll -> poll , & ipt , mask ,
4471
4475
io_async_wake );
4472
4476
if (ret ) {
4473
- ipt .error = 0 ;
4474
- /* only remove double add if we did it here */
4475
- if (!had_io )
4476
- io_poll_remove_double (req );
4477
+ io_poll_remove_double (req , apoll -> double_poll );
4477
4478
spin_unlock_irq (& ctx -> completion_lock );
4478
4479
if (req -> flags & REQ_F_WORK_INITIALIZED )
4479
4480
memcpy (& req -> work , & apoll -> work , sizeof (req -> work ));
4481
+ kfree (apoll -> double_poll );
4480
4482
kfree (apoll );
4481
4483
return false;
4482
4484
}
@@ -4507,11 +4509,13 @@ static bool io_poll_remove_one(struct io_kiocb *req)
4507
4509
bool do_complete ;
4508
4510
4509
4511
if (req -> opcode == IORING_OP_POLL_ADD ) {
4510
- io_poll_remove_double (req );
4512
+ io_poll_remove_double (req , req -> io );
4511
4513
do_complete = __io_poll_remove_one (req , & req -> poll );
4512
4514
} else {
4513
4515
struct async_poll * apoll = req -> apoll ;
4514
4516
4517
+ io_poll_remove_double (req , apoll -> double_poll );
4518
+
4515
4519
/* non-poll requests have submit ref still */
4516
4520
do_complete = __io_poll_remove_one (req , & apoll -> poll );
4517
4521
if (do_complete ) {
@@ -4524,6 +4528,7 @@ static bool io_poll_remove_one(struct io_kiocb *req)
4524
4528
if (req -> flags & REQ_F_WORK_INITIALIZED )
4525
4529
memcpy (& req -> work , & apoll -> work ,
4526
4530
sizeof (req -> work ));
4531
+ kfree (apoll -> double_poll );
4527
4532
kfree (apoll );
4528
4533
}
4529
4534
}
@@ -4624,7 +4629,7 @@ static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
4624
4629
{
4625
4630
struct io_poll_table * pt = container_of (p , struct io_poll_table , pt );
4626
4631
4627
- __io_queue_proc (& pt -> req -> poll , pt , head );
4632
+ __io_queue_proc (& pt -> req -> poll , pt , head , ( struct io_poll_iocb * * ) & pt -> req -> io );
4628
4633
}
4629
4634
4630
4635
static int io_poll_add_prep (struct io_kiocb * req , const struct io_uring_sqe * sqe )
0 commit comments