60
60
| UBLK_F_UNPRIVILEGED_DEV \
61
61
| UBLK_F_CMD_IOCTL_ENCODE \
62
62
| UBLK_F_USER_COPY \
63
- | UBLK_F_ZONED)
63
+ | UBLK_F_ZONED \
64
+ | UBLK_F_USER_RECOVERY_FAIL_IO)
64
65
65
66
#define UBLK_F_ALL_RECOVERY_FLAGS (UBLK_F_USER_RECOVERY \
66
- | UBLK_F_USER_RECOVERY_REISSUE)
67
+ | UBLK_F_USER_RECOVERY_REISSUE \
68
+ | UBLK_F_USER_RECOVERY_FAIL_IO)
67
69
68
70
/* All UBLK_PARAM_TYPE_* should be included here */
69
71
#define UBLK_PARAM_TYPE_ALL \
@@ -146,6 +148,7 @@ struct ublk_queue {
146
148
bool force_abort ;
147
149
bool timeout ;
148
150
bool canceling ;
151
+ bool fail_io ; /* copy of dev->state == UBLK_S_DEV_FAIL_IO */
149
152
unsigned short nr_io_ready ; /* how many ios setup */
150
153
spinlock_t cancel_lock ;
151
154
struct ublk_device * dev ;
@@ -690,7 +693,8 @@ static inline bool ublk_nosrv_should_reissue_outstanding(struct ublk_device *ub)
690
693
*/
691
694
static inline bool ublk_nosrv_dev_should_queue_io (struct ublk_device * ub )
692
695
{
693
- return ub -> dev_info .flags & UBLK_F_USER_RECOVERY ;
696
+ return (ub -> dev_info .flags & UBLK_F_USER_RECOVERY ) &&
697
+ !(ub -> dev_info .flags & UBLK_F_USER_RECOVERY_FAIL_IO );
694
698
}
695
699
696
700
/*
@@ -700,7 +704,8 @@ static inline bool ublk_nosrv_dev_should_queue_io(struct ublk_device *ub)
700
704
*/
701
705
static inline bool ublk_nosrv_should_queue_io (struct ublk_queue * ubq )
702
706
{
703
- return ubq -> flags & UBLK_F_USER_RECOVERY ;
707
+ return (ubq -> flags & UBLK_F_USER_RECOVERY ) &&
708
+ !(ubq -> flags & UBLK_F_USER_RECOVERY_FAIL_IO );
704
709
}
705
710
706
711
/*
@@ -714,6 +719,12 @@ static inline bool ublk_nosrv_should_stop_dev(struct ublk_device *ub)
714
719
return !(ub -> dev_info .flags & UBLK_F_USER_RECOVERY );
715
720
}
716
721
722
+ static inline bool ublk_dev_in_recoverable_state (struct ublk_device * ub )
723
+ {
724
+ return ub -> dev_info .state == UBLK_S_DEV_QUIESCED ||
725
+ ub -> dev_info .state == UBLK_S_DEV_FAIL_IO ;
726
+ }
727
+
717
728
static void ublk_free_disk (struct gendisk * disk )
718
729
{
719
730
struct ublk_device * ub = disk -> private_data ;
@@ -1275,6 +1286,10 @@ static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
1275
1286
struct request * rq = bd -> rq ;
1276
1287
blk_status_t res ;
1277
1288
1289
+ if (unlikely (ubq -> fail_io )) {
1290
+ return BLK_STS_TARGET ;
1291
+ }
1292
+
1278
1293
/* fill iod to slot in io cmd buffer */
1279
1294
res = ublk_setup_iod (ubq , rq );
1280
1295
if (unlikely (res != BLK_STS_OK ))
@@ -1625,6 +1640,7 @@ static void ublk_nosrv_work(struct work_struct *work)
1625
1640
{
1626
1641
struct ublk_device * ub =
1627
1642
container_of (work , struct ublk_device , nosrv_work );
1643
+ int i ;
1628
1644
1629
1645
if (ublk_nosrv_should_stop_dev (ub )) {
1630
1646
ublk_stop_dev (ub );
@@ -1634,7 +1650,18 @@ static void ublk_nosrv_work(struct work_struct *work)
1634
1650
mutex_lock (& ub -> mutex );
1635
1651
if (ub -> dev_info .state != UBLK_S_DEV_LIVE )
1636
1652
goto unlock ;
1637
- __ublk_quiesce_dev (ub );
1653
+
1654
+ if (ublk_nosrv_dev_should_queue_io (ub )) {
1655
+ __ublk_quiesce_dev (ub );
1656
+ } else {
1657
+ blk_mq_quiesce_queue (ub -> ub_disk -> queue );
1658
+ ub -> dev_info .state = UBLK_S_DEV_FAIL_IO ;
1659
+ for (i = 0 ; i < ub -> dev_info .nr_hw_queues ; i ++ ) {
1660
+ ublk_get_queue (ub , i )-> fail_io = true;
1661
+ }
1662
+ blk_mq_unquiesce_queue (ub -> ub_disk -> queue );
1663
+ }
1664
+
1638
1665
unlock :
1639
1666
mutex_unlock (& ub -> mutex );
1640
1667
ublk_cancel_dev (ub );
@@ -2387,8 +2414,13 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
2387
2414
return - EPERM ;
2388
2415
2389
2416
/* forbid nonsense combinations of recovery flags */
2390
- if ((info .flags & UBLK_F_USER_RECOVERY_REISSUE ) &&
2391
- !(info .flags & UBLK_F_USER_RECOVERY )) {
2417
+ switch (info .flags & UBLK_F_ALL_RECOVERY_FLAGS ) {
2418
+ case 0 :
2419
+ case UBLK_F_USER_RECOVERY :
2420
+ case (UBLK_F_USER_RECOVERY | UBLK_F_USER_RECOVERY_REISSUE ):
2421
+ case (UBLK_F_USER_RECOVERY | UBLK_F_USER_RECOVERY_FAIL_IO ):
2422
+ break ;
2423
+ default :
2392
2424
pr_warn ("%s: invalid recovery flags %llx\n" , __func__ ,
2393
2425
info .flags & UBLK_F_ALL_RECOVERY_FLAGS );
2394
2426
return - EINVAL ;
@@ -2729,14 +2761,18 @@ static int ublk_ctrl_start_recovery(struct ublk_device *ub,
2729
2761
* and related io_uring ctx is freed so file struct of /dev/ublkcX is
2730
2762
* released.
2731
2763
*
2764
+ * and one of the following holds
2765
+ *
2732
2766
* (2) UBLK_S_DEV_QUIESCED is set, which means the quiesce_work:
2733
2767
* (a)has quiesced request queue
2734
2768
* (b)has requeued every inflight rqs whose io_flags is ACTIVE
2735
2769
* (c)has requeued/aborted every inflight rqs whose io_flags is NOT ACTIVE
2736
2770
* (d)has completed/camceled all ioucmds owned by ther dying process
2771
+ *
2772
+ * (3) UBLK_S_DEV_FAIL_IO is set, which means the queue is not
2773
+ * quiesced, but all I/O is being immediately errored
2737
2774
*/
2738
- if (test_bit (UB_STATE_OPEN , & ub -> state ) ||
2739
- ub -> dev_info .state != UBLK_S_DEV_QUIESCED ) {
2775
+ if (test_bit (UB_STATE_OPEN , & ub -> state ) || !ublk_dev_in_recoverable_state (ub )) {
2740
2776
ret = - EBUSY ;
2741
2777
goto out_unlock ;
2742
2778
}
@@ -2760,6 +2796,7 @@ static int ublk_ctrl_end_recovery(struct ublk_device *ub,
2760
2796
const struct ublksrv_ctrl_cmd * header = io_uring_sqe_cmd (cmd -> sqe );
2761
2797
int ublksrv_pid = (int )header -> data [0 ];
2762
2798
int ret = - EINVAL ;
2799
+ int i ;
2763
2800
2764
2801
pr_devel ("%s: Waiting for new ubq_daemons(nr: %d) are ready, dev id %d...\n" ,
2765
2802
__func__ , ub -> dev_info .nr_hw_queues , header -> dev_id );
@@ -2774,18 +2811,29 @@ static int ublk_ctrl_end_recovery(struct ublk_device *ub,
2774
2811
if (ublk_nosrv_should_stop_dev (ub ))
2775
2812
goto out_unlock ;
2776
2813
2777
- if (ub -> dev_info . state != UBLK_S_DEV_QUIESCED ) {
2814
+ if (! ublk_dev_in_recoverable_state ( ub ) ) {
2778
2815
ret = - EBUSY ;
2779
2816
goto out_unlock ;
2780
2817
}
2781
2818
ub -> dev_info .ublksrv_pid = ublksrv_pid ;
2782
2819
pr_devel ("%s: new ublksrv_pid %d, dev id %d\n" ,
2783
2820
__func__ , ublksrv_pid , header -> dev_id );
2784
- blk_mq_unquiesce_queue (ub -> ub_disk -> queue );
2785
- pr_devel ("%s: queue unquiesced, dev id %d.\n" ,
2786
- __func__ , header -> dev_id );
2787
- blk_mq_kick_requeue_list (ub -> ub_disk -> queue );
2788
- ub -> dev_info .state = UBLK_S_DEV_LIVE ;
2821
+
2822
+ if (ublk_nosrv_dev_should_queue_io (ub )) {
2823
+ ub -> dev_info .state = UBLK_S_DEV_LIVE ;
2824
+ blk_mq_unquiesce_queue (ub -> ub_disk -> queue );
2825
+ pr_devel ("%s: queue unquiesced, dev id %d.\n" ,
2826
+ __func__ , header -> dev_id );
2827
+ blk_mq_kick_requeue_list (ub -> ub_disk -> queue );
2828
+ } else {
2829
+ blk_mq_quiesce_queue (ub -> ub_disk -> queue );
2830
+ ub -> dev_info .state = UBLK_S_DEV_LIVE ;
2831
+ for (i = 0 ; i < ub -> dev_info .nr_hw_queues ; i ++ ) {
2832
+ ublk_get_queue (ub , i )-> fail_io = false;
2833
+ }
2834
+ blk_mq_unquiesce_queue (ub -> ub_disk -> queue );
2835
+ }
2836
+
2789
2837
ret = 0 ;
2790
2838
out_unlock :
2791
2839
mutex_unlock (& ub -> mutex );
0 commit comments