57
57
#define UBLK_PARAM_TYPE_ALL (UBLK_PARAM_TYPE_BASIC | UBLK_PARAM_TYPE_DISCARD)
58
58
59
59
struct ublk_rq_data {
60
- struct callback_head work ;
60
+ union {
61
+ struct callback_head work ;
62
+ struct llist_node node ;
63
+ };
61
64
};
62
65
63
66
struct ublk_uring_cmd_pdu {
64
- struct request * req ;
67
+ struct ublk_queue * ubq ;
65
68
};
66
69
67
70
/*
@@ -119,6 +122,8 @@ struct ublk_queue {
119
122
struct task_struct * ubq_daemon ;
120
123
char * io_cmd_buf ;
121
124
125
+ struct llist_head io_cmds ;
126
+
122
127
unsigned long io_addr ; /* mapped vm address */
123
128
unsigned int max_io_sz ;
124
129
bool force_abort ;
@@ -764,8 +769,12 @@ static inline void __ublk_rq_task_work(struct request *req)
764
769
static void ublk_rq_task_work_cb (struct io_uring_cmd * cmd )
765
770
{
766
771
struct ublk_uring_cmd_pdu * pdu = ublk_get_uring_cmd_pdu (cmd );
772
+ struct ublk_queue * ubq = pdu -> ubq ;
773
+ struct llist_node * io_cmds = llist_del_all (& ubq -> io_cmds );
774
+ struct ublk_rq_data * data ;
767
775
768
- __ublk_rq_task_work (pdu -> req );
776
+ llist_for_each_entry (data , io_cmds , node )
777
+ __ublk_rq_task_work (blk_mq_rq_from_pdu (data ));
769
778
}
770
779
771
780
static void ublk_rq_task_work_fn (struct callback_head * work )
@@ -777,17 +786,50 @@ static void ublk_rq_task_work_fn(struct callback_head *work)
777
786
__ublk_rq_task_work (req );
778
787
}
779
788
789
+ static void ublk_submit_cmd (struct ublk_queue * ubq , const struct request * rq )
790
+ {
791
+ struct ublk_io * io = & ubq -> ios [rq -> tag ];
792
+
793
+ /*
794
+ * If the check pass, we know that this is a re-issued request aborted
795
+ * previously in monitor_work because the ubq_daemon(cmd's task) is
796
+ * PF_EXITING. We cannot call io_uring_cmd_complete_in_task() anymore
797
+ * because this ioucmd's io_uring context may be freed now if no inflight
798
+ * ioucmd exists. Otherwise we may cause null-deref in ctx->fallback_work.
799
+ *
800
+ * Note: monitor_work sets UBLK_IO_FLAG_ABORTED and ends this request(releasing
801
+ * the tag). Then the request is re-started(allocating the tag) and we are here.
802
+ * Since releasing/allocating a tag implies smp_mb(), finding UBLK_IO_FLAG_ABORTED
803
+ * guarantees that here is a re-issued request aborted previously.
804
+ */
805
+ if (unlikely (io -> flags & UBLK_IO_FLAG_ABORTED )) {
806
+ struct llist_node * io_cmds = llist_del_all (& ubq -> io_cmds );
807
+ struct ublk_rq_data * data ;
808
+
809
+ llist_for_each_entry (data , io_cmds , node )
810
+ __ublk_abort_rq (ubq , blk_mq_rq_from_pdu (data ));
811
+ } else {
812
+ struct io_uring_cmd * cmd = io -> cmd ;
813
+ struct ublk_uring_cmd_pdu * pdu = ublk_get_uring_cmd_pdu (cmd );
814
+
815
+ pdu -> ubq = ubq ;
816
+ io_uring_cmd_complete_in_task (cmd , ublk_rq_task_work_cb );
817
+ }
818
+ }
819
+
780
820
static blk_status_t ublk_queue_rq (struct blk_mq_hw_ctx * hctx ,
781
821
const struct blk_mq_queue_data * bd )
782
822
{
783
823
struct ublk_queue * ubq = hctx -> driver_data ;
784
824
struct request * rq = bd -> rq ;
825
+ struct ublk_rq_data * data = blk_mq_rq_to_pdu (rq );
785
826
blk_status_t res ;
786
827
787
828
/* fill iod to slot in io cmd buffer */
788
829
res = ublk_setup_iod (ubq , rq );
789
830
if (unlikely (res != BLK_STS_OK ))
790
831
return BLK_STS_IOERR ;
832
+
791
833
/* With recovery feature enabled, force_abort is set in
792
834
* ublk_stop_dev() before calling del_gendisk(). We have to
793
835
* abort all requeued and new rqs here to let del_gendisk()
@@ -809,36 +851,15 @@ static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
809
851
}
810
852
811
853
if (ublk_can_use_task_work (ubq )) {
812
- struct ublk_rq_data * data = blk_mq_rq_to_pdu (rq );
813
854
enum task_work_notify_mode notify_mode = bd -> last ?
814
855
TWA_SIGNAL_NO_IPI : TWA_NONE ;
815
856
816
857
if (task_work_add (ubq -> ubq_daemon , & data -> work , notify_mode ))
817
858
goto fail ;
818
859
} else {
819
- struct ublk_io * io = & ubq -> ios [rq -> tag ];
820
- struct io_uring_cmd * cmd = io -> cmd ;
821
- struct ublk_uring_cmd_pdu * pdu = ublk_get_uring_cmd_pdu (cmd );
822
-
823
- /*
824
- * If the check pass, we know that this is a re-issued request aborted
825
- * previously in monitor_work because the ubq_daemon(cmd's task) is
826
- * PF_EXITING. We cannot call io_uring_cmd_complete_in_task() anymore
827
- * because this ioucmd's io_uring context may be freed now if no inflight
828
- * ioucmd exists. Otherwise we may cause null-deref in ctx->fallback_work.
829
- *
830
- * Note: monitor_work sets UBLK_IO_FLAG_ABORTED and ends this request(releasing
831
- * the tag). Then the request is re-started(allocating the tag) and we are here.
832
- * Since releasing/allocating a tag implies smp_mb(), finding UBLK_IO_FLAG_ABORTED
833
- * guarantees that here is a re-issued request aborted previously.
834
- */
835
- if ((io -> flags & UBLK_IO_FLAG_ABORTED ))
836
- goto fail ;
837
-
838
- pdu -> req = rq ;
839
- io_uring_cmd_complete_in_task (cmd , ublk_rq_task_work_cb );
860
+ if (llist_add (& data -> node , & ubq -> io_cmds ))
861
+ ublk_submit_cmd (ubq , rq );
840
862
}
841
-
842
863
return BLK_STS_OK ;
843
864
}
844
865
@@ -1168,17 +1189,19 @@ static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id,
1168
1189
{
1169
1190
struct ublk_queue * ubq = ublk_get_queue (ub , q_id );
1170
1191
struct request * req = blk_mq_tag_to_rq (ub -> tag_set .tags [q_id ], tag );
1192
+ struct ublk_rq_data * data = blk_mq_rq_to_pdu (req );
1171
1193
1172
1194
if (ublk_can_use_task_work (ubq )) {
1173
- struct ublk_rq_data * data = blk_mq_rq_to_pdu (req );
1174
-
1175
1195
/* should not fail since we call it just in ubq->ubq_daemon */
1176
1196
task_work_add (ubq -> ubq_daemon , & data -> work , TWA_SIGNAL_NO_IPI );
1177
1197
} else {
1178
1198
struct ublk_uring_cmd_pdu * pdu = ublk_get_uring_cmd_pdu (cmd );
1179
1199
1180
- pdu -> req = req ;
1181
- io_uring_cmd_complete_in_task (cmd , ublk_rq_task_work_cb );
1200
+ if (llist_add (& data -> node , & ubq -> io_cmds )) {
1201
+ pdu -> ubq = ubq ;
1202
+ io_uring_cmd_complete_in_task (cmd ,
1203
+ ublk_rq_task_work_cb );
1204
+ }
1182
1205
}
1183
1206
}
1184
1207
0 commit comments