Skip to content

Commit 212c928

Browse files
ps-ushankaraxboe
authored andcommitted
ublk: don't quiesce in ublk_ch_release
ublk_ch_release currently quiesces the device's request_queue while setting force_abort/fail_io. This avoids data races by preventing concurrent reads from the I/O path, but is not strictly needed - at this point, canceling is already set and guaranteed to be observed by any concurrently executing I/Os, so they will be handled properly even if the changes to force_abort/fail_io propagate to the I/O path later. Remove the quiesce/unquiesce calls from ublk_ch_release. This makes the writes to force_abort/fail_io concurrent with the reads in the I/O path, so make the accesses atomic. Before this change, the call to blk_mq_quiesce_queue was responsible for most (90%) of the runtime of ublk_ch_release. With that call eliminated, ublk_ch_release runs much faster. Here is a comparison of the total time spent in calls to ublk_ch_release when a server handling 128 devices exits, before and after this change: before: 1.11s after: 0.09s Signed-off-by: Uday Shankar <[email protected]> Reviewed-by: Ming Lei <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jens Axboe <[email protected]>
1 parent d5dd409 commit 212c928

File tree

1 file changed

+5
-7
lines changed

1 file changed

+5
-7
lines changed

drivers/block/ublk_drv.c

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1389,7 +1389,7 @@ static blk_status_t ublk_prep_req(struct ublk_queue *ubq, struct request *rq,
13891389
{
13901390
blk_status_t res;
13911391

1392-
if (unlikely(ubq->fail_io))
1392+
if (unlikely(READ_ONCE(ubq->fail_io)))
13931393
return BLK_STS_TARGET;
13941394

13951395
/* With recovery feature enabled, force_abort is set in
@@ -1401,7 +1401,8 @@ static blk_status_t ublk_prep_req(struct ublk_queue *ubq, struct request *rq,
14011401
* Note: force_abort is guaranteed to be seen because it is set
14021402
* before request queue is unqiuesced.
14031403
*/
1404-
if (ublk_nosrv_should_queue_io(ubq) && unlikely(ubq->force_abort))
1404+
if (ublk_nosrv_should_queue_io(ubq) &&
1405+
unlikely(READ_ONCE(ubq->force_abort)))
14051406
return BLK_STS_IOERR;
14061407

14071408
if (check_cancel && unlikely(ubq->canceling))
@@ -1644,16 +1645,14 @@ static int ublk_ch_release(struct inode *inode, struct file *filp)
16441645
* Transition the device to the nosrv state. What exactly this
16451646
* means depends on the recovery flags
16461647
*/
1647-
blk_mq_quiesce_queue(disk->queue);
16481648
if (ublk_nosrv_should_stop_dev(ub)) {
16491649
/*
16501650
* Allow any pending/future I/O to pass through quickly
16511651
* with an error. This is needed because del_gendisk
16521652
* waits for all pending I/O to complete
16531653
*/
16541654
for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
1655-
ublk_get_queue(ub, i)->force_abort = true;
1656-
blk_mq_unquiesce_queue(disk->queue);
1655+
WRITE_ONCE(ublk_get_queue(ub, i)->force_abort, true);
16571656

16581657
ublk_stop_dev_unlocked(ub);
16591658
} else {
@@ -1663,9 +1662,8 @@ static int ublk_ch_release(struct inode *inode, struct file *filp)
16631662
} else {
16641663
ub->dev_info.state = UBLK_S_DEV_FAIL_IO;
16651664
for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
1666-
ublk_get_queue(ub, i)->fail_io = true;
1665+
WRITE_ONCE(ublk_get_queue(ub, i)->fail_io, true);
16671666
}
1668-
blk_mq_unquiesce_queue(disk->queue);
16691667
}
16701668
unlock:
16711669
mutex_unlock(&ub->mutex);

0 commit comments

Comments
 (0)