@@ -1108,10 +1108,16 @@ static void __io_commit_cqring(struct io_ring_ctx *ctx)
1108
1108
}
1109
1109
}
1110
1110
1111
- static void io_req_clean_work (struct io_kiocb * req )
1111
+ /*
1112
+ * Returns true if we need to defer file table putting. This can only happen
1113
+ * from the error path with REQ_F_COMP_LOCKED set.
1114
+ */
1115
+ static bool io_req_clean_work (struct io_kiocb * req )
1112
1116
{
1113
1117
if (!(req -> flags & REQ_F_WORK_INITIALIZED ))
1114
- return ;
1118
+ return false;
1119
+
1120
+ req -> flags &= ~REQ_F_WORK_INITIALIZED ;
1115
1121
1116
1122
if (req -> work .mm ) {
1117
1123
mmdrop (req -> work .mm );
@@ -1124,6 +1130,9 @@ static void io_req_clean_work(struct io_kiocb *req)
1124
1130
if (req -> work .fs ) {
1125
1131
struct fs_struct * fs = req -> work .fs ;
1126
1132
1133
+ if (req -> flags & REQ_F_COMP_LOCKED )
1134
+ return true;
1135
+
1127
1136
spin_lock (& req -> work .fs -> lock );
1128
1137
if (-- fs -> users )
1129
1138
fs = NULL ;
@@ -1132,7 +1141,8 @@ static void io_req_clean_work(struct io_kiocb *req)
1132
1141
free_fs_struct (fs );
1133
1142
req -> work .fs = NULL ;
1134
1143
}
1135
- req -> flags &= ~REQ_F_WORK_INITIALIZED ;
1144
+
1145
+ return false;
1136
1146
}
1137
1147
1138
1148
static void io_prep_async_work (struct io_kiocb * req )
@@ -1544,15 +1554,14 @@ static inline void io_put_file(struct io_kiocb *req, struct file *file,
1544
1554
fput (file );
1545
1555
}
1546
1556
1547
- static void io_dismantle_req (struct io_kiocb * req )
1557
+ static bool io_dismantle_req (struct io_kiocb * req )
1548
1558
{
1549
1559
io_clean_op (req );
1550
1560
1551
1561
if (req -> io )
1552
1562
kfree (req -> io );
1553
1563
if (req -> file )
1554
1564
io_put_file (req , req -> file , (req -> flags & REQ_F_FIXED_FILE ));
1555
- io_req_clean_work (req );
1556
1565
1557
1566
if (req -> flags & REQ_F_INFLIGHT ) {
1558
1567
struct io_ring_ctx * ctx = req -> ctx ;
@@ -1564,22 +1573,55 @@ static void io_dismantle_req(struct io_kiocb *req)
1564
1573
wake_up (& ctx -> inflight_wait );
1565
1574
spin_unlock_irqrestore (& ctx -> inflight_lock , flags );
1566
1575
}
1576
+
1577
+ return io_req_clean_work (req );
1567
1578
}
1568
1579
1569
- static void __io_free_req (struct io_kiocb * req )
1580
+ static void __io_free_req_finish (struct io_kiocb * req )
1570
1581
{
1571
- struct io_ring_ctx * ctx ;
1582
+ struct io_ring_ctx * ctx = req -> ctx ;
1572
1583
1573
- io_dismantle_req (req );
1574
1584
__io_put_req_task (req );
1575
- ctx = req -> ctx ;
1576
1585
if (likely (!io_is_fallback_req (req )))
1577
1586
kmem_cache_free (req_cachep , req );
1578
1587
else
1579
1588
clear_bit_unlock (0 , (unsigned long * ) & ctx -> fallback_req );
1580
1589
percpu_ref_put (& ctx -> refs );
1581
1590
}
1582
1591
1592
+ static void io_req_task_file_table_put (struct callback_head * cb )
1593
+ {
1594
+ struct io_kiocb * req = container_of (cb , struct io_kiocb , task_work );
1595
+ struct fs_struct * fs = req -> work .fs ;
1596
+
1597
+ spin_lock (& req -> work .fs -> lock );
1598
+ if (-- fs -> users )
1599
+ fs = NULL ;
1600
+ spin_unlock (& req -> work .fs -> lock );
1601
+ if (fs )
1602
+ free_fs_struct (fs );
1603
+ req -> work .fs = NULL ;
1604
+ __io_free_req_finish (req );
1605
+ }
1606
+
1607
+ static void __io_free_req (struct io_kiocb * req )
1608
+ {
1609
+ if (!io_dismantle_req (req )) {
1610
+ __io_free_req_finish (req );
1611
+ } else {
1612
+ int ret ;
1613
+
1614
+ init_task_work (& req -> task_work , io_req_task_file_table_put );
1615
+ ret = task_work_add (req -> task , & req -> task_work , TWA_RESUME );
1616
+ if (unlikely (ret )) {
1617
+ struct task_struct * tsk ;
1618
+
1619
+ tsk = io_wq_get_task (req -> ctx -> io_wq );
1620
+ task_work_add (tsk , & req -> task_work , 0 );
1621
+ }
1622
+ }
1623
+ }
1624
+
1583
1625
static bool io_link_cancel_timeout (struct io_kiocb * req )
1584
1626
{
1585
1627
struct io_ring_ctx * ctx = req -> ctx ;
@@ -1868,7 +1910,7 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req)
1868
1910
req -> flags &= ~REQ_F_TASK_PINNED ;
1869
1911
}
1870
1912
1871
- io_dismantle_req (req );
1913
+ WARN_ON_ONCE ( io_dismantle_req (req ) );
1872
1914
rb -> reqs [rb -> to_free ++ ] = req ;
1873
1915
if (unlikely (rb -> to_free == ARRAY_SIZE (rb -> reqs )))
1874
1916
__io_req_free_batch_flush (req -> ctx , rb );
0 commit comments