Skip to content

Commit a271a89

Browse files
committed
dm mpath: take m->lock spinlock when testing QUEUE_IF_NO_PATH
Fix multipath_end_io, multipath_end_io_bio and multipath_busy to take m->lock while testing if MPATHF_QUEUE_IF_NO_PATH bit is set. These are all slow-path cases when no paths are available so extra locking isn't a performance hit. Correctness matters most. Signed-off-by: Mike Snitzer <[email protected]>
1 parent 69cea0d commit a271a89

File tree

1 file changed

+34
-18
lines changed

1 file changed

+34
-18
lines changed

drivers/md/dm-mpath.c

Lines changed: 34 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -1621,12 +1621,16 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
16211621
if (pgpath)
16221622
fail_path(pgpath);
16231623

1624-
if (atomic_read(&m->nr_valid_paths) == 0 &&
1625-
!must_push_back_rq(m)) {
1626-
if (error == BLK_STS_IOERR)
1627-
dm_report_EIO(m);
1628-
/* complete with the original error */
1629-
r = DM_ENDIO_DONE;
1624+
if (!atomic_read(&m->nr_valid_paths)) {
1625+
unsigned long flags;
1626+
spin_lock_irqsave(&m->lock, flags);
1627+
if (!must_push_back_rq(m)) {
1628+
if (error == BLK_STS_IOERR)
1629+
dm_report_EIO(m);
1630+
/* complete with the original error */
1631+
r = DM_ENDIO_DONE;
1632+
}
1633+
spin_unlock_irqrestore(&m->lock, flags);
16301634
}
16311635
}
16321636

@@ -1656,15 +1660,19 @@ static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
16561660
if (pgpath)
16571661
fail_path(pgpath);
16581662

1659-
if (atomic_read(&m->nr_valid_paths) == 0 &&
1660-
!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
1661-
if (__must_push_back(m)) {
1662-
r = DM_ENDIO_REQUEUE;
1663-
} else {
1664-
dm_report_EIO(m);
1665-
*error = BLK_STS_IOERR;
1663+
if (!atomic_read(&m->nr_valid_paths)) {
1664+
spin_lock_irqsave(&m->lock, flags);
1665+
if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
1666+
if (__must_push_back(m)) {
1667+
r = DM_ENDIO_REQUEUE;
1668+
} else {
1669+
dm_report_EIO(m);
1670+
*error = BLK_STS_IOERR;
1671+
}
1672+
spin_unlock_irqrestore(&m->lock, flags);
1673+
goto done;
16661674
}
1667-
goto done;
1675+
spin_unlock_irqrestore(&m->lock, flags);
16681676
}
16691677

16701678
spin_lock_irqsave(&m->lock, flags);
@@ -1962,10 +1970,11 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
19621970
}
19631971
} else {
19641972
/* No path is available */
1973+
r = -EIO;
1974+
spin_lock_irqsave(&m->lock, flags);
19651975
if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
19661976
r = -ENOTCONN;
1967-
else
1968-
r = -EIO;
1977+
spin_unlock_irqrestore(&m->lock, flags);
19691978
}
19701979

19711980
if (r == -ENOTCONN) {
@@ -2036,8 +2045,15 @@ static int multipath_busy(struct dm_target *ti)
20362045
return true;
20372046

20382047
/* no paths available, for blk-mq: rely on IO mapping to delay requeue */
2039-
if (!atomic_read(&m->nr_valid_paths) && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
2040-
return (m->queue_mode != DM_TYPE_REQUEST_BASED);
2048+
if (!atomic_read(&m->nr_valid_paths)) {
2049+
unsigned long flags;
2050+
spin_lock_irqsave(&m->lock, flags);
2051+
if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
2052+
spin_unlock_irqrestore(&m->lock, flags);
2053+
return (m->queue_mode != DM_TYPE_REQUEST_BASED);
2054+
}
2055+
spin_unlock_irqrestore(&m->lock, flags);
2056+
}
20412057

20422058
/* Guess which priority_group will be used at next mapping time */
20432059
pg = READ_ONCE(m->current_pg);

0 commit comments

Comments
 (0)