@@ -754,29 +754,29 @@ static void io_cqring_overflow_flush(struct io_ring_ctx *ctx)
754
754
}
755
755
756
756
/* can be called by any task */
757
- static void io_put_task_remote (struct task_struct * task , int nr )
757
+ static void io_put_task_remote (struct task_struct * task )
758
758
{
759
759
struct io_uring_task * tctx = task -> io_uring ;
760
760
761
- percpu_counter_sub (& tctx -> inflight , nr );
761
+ percpu_counter_sub (& tctx -> inflight , 1 );
762
762
if (unlikely (atomic_read (& tctx -> in_cancel )))
763
763
wake_up (& tctx -> wait );
764
- put_task_struct_many (task , nr );
764
+ put_task_struct (task );
765
765
}
766
766
767
767
/* used by a task to put its own references */
768
- static void io_put_task_local (struct task_struct * task , int nr )
768
+ static void io_put_task_local (struct task_struct * task )
769
769
{
770
- task -> io_uring -> cached_refs += nr ;
770
+ task -> io_uring -> cached_refs ++ ;
771
771
}
772
772
773
773
/* must to be called somewhat shortly after putting a request */
774
- static inline void io_put_task (struct task_struct * task , int nr )
774
+ static inline void io_put_task (struct task_struct * task )
775
775
{
776
776
if (likely (task == current ))
777
- io_put_task_local (task , nr );
777
+ io_put_task_local (task );
778
778
else
779
- io_put_task_remote (task , nr );
779
+ io_put_task_remote (task );
780
780
}
781
781
782
782
void io_task_refs_refill (struct io_uring_task * tctx )
@@ -1033,7 +1033,7 @@ static void __io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
1033
1033
* we don't hold ->completion_lock. Clean them here to avoid
1034
1034
* deadlocks.
1035
1035
*/
1036
- io_put_task_remote (req -> task , 1 );
1036
+ io_put_task_remote (req -> task );
1037
1037
wq_list_add_head (& req -> comp_list , & ctx -> locked_free_list );
1038
1038
ctx -> locked_free_nr ++ ;
1039
1039
}
@@ -1518,9 +1518,6 @@ void io_queue_next(struct io_kiocb *req)
1518
1518
void io_free_batch_list (struct io_ring_ctx * ctx , struct io_wq_work_node * node )
1519
1519
__must_hold (& ctx - > uring_lock )
1520
1520
{
1521
- struct task_struct * task = NULL ;
1522
- int task_refs = 0 ;
1523
-
1524
1521
do {
1525
1522
struct io_kiocb * req = container_of (node , struct io_kiocb ,
1526
1523
comp_list );
@@ -1550,19 +1547,10 @@ void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node)
1550
1547
1551
1548
io_req_put_rsrc_locked (req , ctx );
1552
1549
1553
- if (req -> task != task ) {
1554
- if (task )
1555
- io_put_task (task , task_refs );
1556
- task = req -> task ;
1557
- task_refs = 0 ;
1558
- }
1559
- task_refs ++ ;
1550
+ io_put_task (req -> task );
1560
1551
node = req -> comp_list .next ;
1561
1552
io_req_add_to_cache (req , ctx );
1562
1553
} while (node );
1563
-
1564
- if (task )
1565
- io_put_task (task , task_refs );
1566
1554
}
1567
1555
1568
1556
static void __io_submit_flush_completions (struct io_ring_ctx * ctx )
0 commit comments