@@ -122,15 +122,6 @@ struct ublk_uring_cmd_pdu {
122
122
*/
123
123
#define UBLK_IO_FLAG_OWNED_BY_SRV 0x02
124
124
125
- /*
126
- * IO command is aborted, so this flag is set in case of
127
- * !UBLK_IO_FLAG_ACTIVE.
128
- *
129
- * After this flag is observed, any pending or new incoming request
130
- * associated with this io command will be failed immediately
131
- */
132
- #define UBLK_IO_FLAG_ABORTED 0x04
133
-
134
125
/*
135
126
* UBLK_IO_FLAG_NEED_GET_DATA is set because IO command requires
136
127
* get data buffer address from ublksrv.
@@ -1083,12 +1074,6 @@ static inline void __ublk_complete_rq(struct request *req)
1083
1074
unsigned int unmapped_bytes ;
1084
1075
blk_status_t res = BLK_STS_OK ;
1085
1076
1086
- /* called from ublk_abort_queue() code path */
1087
- if (io -> flags & UBLK_IO_FLAG_ABORTED ) {
1088
- res = BLK_STS_IOERR ;
1089
- goto exit ;
1090
- }
1091
-
1092
1077
/* failed read IO if nothing is read */
1093
1078
if (!io -> res && req_op (req ) == REQ_OP_READ )
1094
1079
io -> res = - EIO ;
@@ -1138,47 +1123,6 @@ static void ublk_complete_rq(struct kref *ref)
1138
1123
__ublk_complete_rq (req );
1139
1124
}
1140
1125
1141
- static void ublk_do_fail_rq (struct request * req )
1142
- {
1143
- struct ublk_queue * ubq = req -> mq_hctx -> driver_data ;
1144
-
1145
- if (ublk_nosrv_should_reissue_outstanding (ubq -> dev ))
1146
- blk_mq_requeue_request (req , false);
1147
- else
1148
- __ublk_complete_rq (req );
1149
- }
1150
-
1151
- static void ublk_fail_rq_fn (struct kref * ref )
1152
- {
1153
- struct ublk_rq_data * data = container_of (ref , struct ublk_rq_data ,
1154
- ref );
1155
- struct request * req = blk_mq_rq_from_pdu (data );
1156
-
1157
- ublk_do_fail_rq (req );
1158
- }
1159
-
1160
- /*
1161
- * Since ublk_rq_task_work_cb always fails requests immediately during
1162
- * exiting, __ublk_fail_req() is only called from abort context during
1163
- * exiting. So lock is unnecessary.
1164
- *
1165
- * Also aborting may not be started yet, keep in mind that one failed
1166
- * request may be issued by block layer again.
1167
- */
1168
- static void __ublk_fail_req (struct ublk_queue * ubq , struct ublk_io * io ,
1169
- struct request * req )
1170
- {
1171
- WARN_ON_ONCE (io -> flags & UBLK_IO_FLAG_ACTIVE );
1172
-
1173
- if (ublk_need_req_ref (ubq )) {
1174
- struct ublk_rq_data * data = blk_mq_rq_to_pdu (req );
1175
-
1176
- kref_put (& data -> ref , ublk_fail_rq_fn );
1177
- } else {
1178
- ublk_do_fail_rq (req );
1179
- }
1180
- }
1181
-
1182
1126
static void ubq_complete_io_cmd (struct ublk_io * io , int res ,
1183
1127
unsigned issue_flags )
1184
1128
{
@@ -1667,10 +1611,26 @@ static void ublk_commit_completion(struct ublk_device *ub,
1667
1611
ublk_put_req_ref (ubq , req );
1668
1612
}
1669
1613
1614
+ static void __ublk_fail_req (struct ublk_queue * ubq , struct ublk_io * io ,
1615
+ struct request * req )
1616
+ {
1617
+ WARN_ON_ONCE (io -> flags & UBLK_IO_FLAG_ACTIVE );
1618
+
1619
+ if (ublk_nosrv_should_reissue_outstanding (ubq -> dev ))
1620
+ blk_mq_requeue_request (req , false);
1621
+ else {
1622
+ io -> res = - EIO ;
1623
+ __ublk_complete_rq (req );
1624
+ }
1625
+ }
1626
+
1670
1627
/*
1671
- * Called from ubq_daemon context via cancel fn, meantime quiesce ublk
1672
- * blk-mq queue, so we are called exclusively with blk-mq and ubq_daemon
1673
- * context, so everything is serialized.
1628
+ * Called from ublk char device release handler, when any uring_cmd is
1629
+ * done, meantime request queue is "quiesced" since all inflight requests
1630
+ * can't be completed because ublk server is dead.
1631
+ *
1632
+ * So no one can hold our request IO reference any more, simply ignore the
1633
+ * reference, and complete the request immediately
1674
1634
*/
1675
1635
static void ublk_abort_queue (struct ublk_device * ub , struct ublk_queue * ubq )
1676
1636
{
@@ -1687,10 +1647,8 @@ static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
1687
1647
* will do it
1688
1648
*/
1689
1649
rq = blk_mq_tag_to_rq (ub -> tag_set .tags [ubq -> q_id ], i );
1690
- if (rq && blk_mq_request_started (rq )) {
1691
- io -> flags |= UBLK_IO_FLAG_ABORTED ;
1650
+ if (rq && blk_mq_request_started (rq ))
1692
1651
__ublk_fail_req (ubq , io , rq );
1693
- }
1694
1652
}
1695
1653
}
1696
1654
}
0 commit comments