57
57
#define UBLK_PARAM_TYPE_ALL (UBLK_PARAM_TYPE_BASIC | UBLK_PARAM_TYPE_DISCARD)
58
58
59
59
struct ublk_rq_data {
60
- struct callback_head work ;
60
+ union {
61
+ struct callback_head work ;
62
+ struct llist_node node ;
63
+ };
61
64
};
62
65
63
66
struct ublk_uring_cmd_pdu {
64
- struct request * req ;
67
+ struct ublk_queue * ubq ;
65
68
};
66
69
67
70
/*
@@ -119,6 +122,8 @@ struct ublk_queue {
119
122
struct task_struct * ubq_daemon ;
120
123
char * io_cmd_buf ;
121
124
125
+ struct llist_head io_cmds ;
126
+
122
127
unsigned long io_addr ; /* mapped vm address */
123
128
unsigned int max_io_sz ;
124
129
bool force_abort ;
@@ -764,8 +769,12 @@ static inline void __ublk_rq_task_work(struct request *req)
764
769
static void ublk_rq_task_work_cb (struct io_uring_cmd * cmd )
765
770
{
766
771
struct ublk_uring_cmd_pdu * pdu = ublk_get_uring_cmd_pdu (cmd );
772
+ struct ublk_queue * ubq = pdu -> ubq ;
773
+ struct llist_node * io_cmds = llist_del_all (& ubq -> io_cmds );
774
+ struct ublk_rq_data * data ;
767
775
768
- __ublk_rq_task_work (pdu -> req );
776
+ llist_for_each_entry (data , io_cmds , node )
777
+ __ublk_rq_task_work (blk_mq_rq_from_pdu (data ));
769
778
}
770
779
771
780
static void ublk_rq_task_work_fn (struct callback_head * work )
@@ -777,6 +786,54 @@ static void ublk_rq_task_work_fn(struct callback_head *work)
777
786
__ublk_rq_task_work (req );
778
787
}
779
788
789
+ static void ublk_submit_cmd (struct ublk_queue * ubq , const struct request * rq )
790
+ {
791
+ struct ublk_io * io = & ubq -> ios [rq -> tag ];
792
+
793
+ /*
794
+ * If the check pass, we know that this is a re-issued request aborted
795
+ * previously in monitor_work because the ubq_daemon(cmd's task) is
796
+ * PF_EXITING. We cannot call io_uring_cmd_complete_in_task() anymore
797
+ * because this ioucmd's io_uring context may be freed now if no inflight
798
+ * ioucmd exists. Otherwise we may cause null-deref in ctx->fallback_work.
799
+ *
800
+ * Note: monitor_work sets UBLK_IO_FLAG_ABORTED and ends this request(releasing
801
+ * the tag). Then the request is re-started(allocating the tag) and we are here.
802
+ * Since releasing/allocating a tag implies smp_mb(), finding UBLK_IO_FLAG_ABORTED
803
+ * guarantees that here is a re-issued request aborted previously.
804
+ */
805
+ if (unlikely (io -> flags & UBLK_IO_FLAG_ABORTED )) {
806
+ struct llist_node * io_cmds = llist_del_all (& ubq -> io_cmds );
807
+ struct ublk_rq_data * data ;
808
+
809
+ llist_for_each_entry (data , io_cmds , node )
810
+ __ublk_abort_rq (ubq , blk_mq_rq_from_pdu (data ));
811
+ } else {
812
+ struct io_uring_cmd * cmd = io -> cmd ;
813
+ struct ublk_uring_cmd_pdu * pdu = ublk_get_uring_cmd_pdu (cmd );
814
+
815
+ pdu -> ubq = ubq ;
816
+ io_uring_cmd_complete_in_task (cmd , ublk_rq_task_work_cb );
817
+ }
818
+ }
819
+
820
+ static void ublk_queue_cmd (struct ublk_queue * ubq , struct request * rq ,
821
+ bool last )
822
+ {
823
+ struct ublk_rq_data * data = blk_mq_rq_to_pdu (rq );
824
+
825
+ if (ublk_can_use_task_work (ubq )) {
826
+ enum task_work_notify_mode notify_mode = last ?
827
+ TWA_SIGNAL_NO_IPI : TWA_NONE ;
828
+
829
+ if (task_work_add (ubq -> ubq_daemon , & data -> work , notify_mode ))
830
+ __ublk_abort_rq (ubq , rq );
831
+ } else {
832
+ if (llist_add (& data -> node , & ubq -> io_cmds ))
833
+ ublk_submit_cmd (ubq , rq );
834
+ }
835
+ }
836
+
780
837
static blk_status_t ublk_queue_rq (struct blk_mq_hw_ctx * hctx ,
781
838
const struct blk_mq_queue_data * bd )
782
839
{
@@ -788,6 +845,7 @@ static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
788
845
res = ublk_setup_iod (ubq , rq );
789
846
if (unlikely (res != BLK_STS_OK ))
790
847
return BLK_STS_IOERR ;
848
+
791
849
/* With recovery feature enabled, force_abort is set in
792
850
* ublk_stop_dev() before calling del_gendisk(). We have to
793
851
* abort all requeued and new rqs here to let del_gendisk()
@@ -803,41 +861,11 @@ static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
803
861
blk_mq_start_request (bd -> rq );
804
862
805
863
if (unlikely (ubq_daemon_is_dying (ubq ))) {
806
- fail :
807
864
__ublk_abort_rq (ubq , rq );
808
865
return BLK_STS_OK ;
809
866
}
810
867
811
- if (ublk_can_use_task_work (ubq )) {
812
- struct ublk_rq_data * data = blk_mq_rq_to_pdu (rq );
813
- enum task_work_notify_mode notify_mode = bd -> last ?
814
- TWA_SIGNAL_NO_IPI : TWA_NONE ;
815
-
816
- if (task_work_add (ubq -> ubq_daemon , & data -> work , notify_mode ))
817
- goto fail ;
818
- } else {
819
- struct ublk_io * io = & ubq -> ios [rq -> tag ];
820
- struct io_uring_cmd * cmd = io -> cmd ;
821
- struct ublk_uring_cmd_pdu * pdu = ublk_get_uring_cmd_pdu (cmd );
822
-
823
- /*
824
- * If the check pass, we know that this is a re-issued request aborted
825
- * previously in monitor_work because the ubq_daemon(cmd's task) is
826
- * PF_EXITING. We cannot call io_uring_cmd_complete_in_task() anymore
827
- * because this ioucmd's io_uring context may be freed now if no inflight
828
- * ioucmd exists. Otherwise we may cause null-deref in ctx->fallback_work.
829
- *
830
- * Note: monitor_work sets UBLK_IO_FLAG_ABORTED and ends this request(releasing
831
- * the tag). Then the request is re-started(allocating the tag) and we are here.
832
- * Since releasing/allocating a tag implies smp_mb(), finding UBLK_IO_FLAG_ABORTED
833
- * guarantees that here is a re-issued request aborted previously.
834
- */
835
- if ((io -> flags & UBLK_IO_FLAG_ABORTED ))
836
- goto fail ;
837
-
838
- pdu -> req = rq ;
839
- io_uring_cmd_complete_in_task (cmd , ublk_rq_task_work_cb );
840
- }
868
+ ublk_queue_cmd (ubq , rq , bd -> last );
841
869
842
870
return BLK_STS_OK ;
843
871
}
@@ -1164,22 +1192,12 @@ static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
1164
1192
}
1165
1193
1166
1194
static void ublk_handle_need_get_data (struct ublk_device * ub , int q_id ,
1167
- int tag , struct io_uring_cmd * cmd )
1195
+ int tag )
1168
1196
{
1169
1197
struct ublk_queue * ubq = ublk_get_queue (ub , q_id );
1170
1198
struct request * req = blk_mq_tag_to_rq (ub -> tag_set .tags [q_id ], tag );
1171
1199
1172
- if (ublk_can_use_task_work (ubq )) {
1173
- struct ublk_rq_data * data = blk_mq_rq_to_pdu (req );
1174
-
1175
- /* should not fail since we call it just in ubq->ubq_daemon */
1176
- task_work_add (ubq -> ubq_daemon , & data -> work , TWA_SIGNAL_NO_IPI );
1177
- } else {
1178
- struct ublk_uring_cmd_pdu * pdu = ublk_get_uring_cmd_pdu (cmd );
1179
-
1180
- pdu -> req = req ;
1181
- io_uring_cmd_complete_in_task (cmd , ublk_rq_task_work_cb );
1182
- }
1200
+ ublk_queue_cmd (ubq , req , true);
1183
1201
}
1184
1202
1185
1203
static int ublk_ch_uring_cmd (struct io_uring_cmd * cmd , unsigned int issue_flags )
@@ -1267,7 +1285,7 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
1267
1285
io -> addr = ub_cmd -> addr ;
1268
1286
io -> cmd = cmd ;
1269
1287
io -> flags |= UBLK_IO_FLAG_ACTIVE ;
1270
- ublk_handle_need_get_data (ub , ub_cmd -> q_id , ub_cmd -> tag , cmd );
1288
+ ublk_handle_need_get_data (ub , ub_cmd -> q_id , ub_cmd -> tag );
1271
1289
break ;
1272
1290
default :
1273
1291
goto out ;
@@ -1658,6 +1676,9 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
1658
1676
*/
1659
1677
ub -> dev_info .flags &= UBLK_F_ALL ;
1660
1678
1679
+ if (!IS_BUILTIN (CONFIG_BLK_DEV_UBLK ))
1680
+ ub -> dev_info .flags |= UBLK_F_URING_CMD_COMP_IN_TASK ;
1681
+
1661
1682
/* We are not ready to support zero copy */
1662
1683
ub -> dev_info .flags &= ~UBLK_F_SUPPORT_ZERO_COPY ;
1663
1684
0 commit comments