Skip to content

Commit ded080c

Browse files
committed
rbd: don't move requests to the running list on errors
The running list is supposed to contain requests that are pinning the exclusive lock, i.e. those that must be flushed before exclusive lock is released. When wake_lock_waiters() is called to handle an error, requests on the acquiring list are failed with that error and no flushing takes place. Briefly moving them to the running list is not only pointless but also harmful: if exclusive lock gets acquired before all of their state machines are scheduled and go through rbd_lock_del_request(), we trigger rbd_assert(list_empty(&rbd_dev->running_list)); in rbd_try_acquire_lock(). Cc: [email protected] Fixes: 637cd06 ("rbd: new exclusive lock wait/wake code") Signed-off-by: Ilya Dryomov <[email protected]> Reviewed-by: Dongsheng Yang <[email protected]>
1 parent cd30e8b commit ded080c

File tree

1 file changed

+14
-8
lines changed

1 file changed

+14
-8
lines changed

drivers/block/rbd.c

Lines changed: 14 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -3452,14 +3452,15 @@ static bool rbd_lock_add_request(struct rbd_img_request *img_req)
34523452
static void rbd_lock_del_request(struct rbd_img_request *img_req)
34533453
{
34543454
struct rbd_device *rbd_dev = img_req->rbd_dev;
3455-
bool need_wakeup;
3455+
bool need_wakeup = false;
34563456

34573457
lockdep_assert_held(&rbd_dev->lock_rwsem);
34583458
spin_lock(&rbd_dev->lock_lists_lock);
3459-
rbd_assert(!list_empty(&img_req->lock_item));
3460-
list_del_init(&img_req->lock_item);
3461-
need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING &&
3462-
list_empty(&rbd_dev->running_list));
3459+
if (!list_empty(&img_req->lock_item)) {
3460+
list_del_init(&img_req->lock_item);
3461+
need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING &&
3462+
list_empty(&rbd_dev->running_list));
3463+
}
34633464
spin_unlock(&rbd_dev->lock_lists_lock);
34643465
if (need_wakeup)
34653466
complete(&rbd_dev->releasing_wait);
@@ -3842,14 +3843,19 @@ static void wake_lock_waiters(struct rbd_device *rbd_dev, int result)
38423843
return;
38433844
}
38443845

3845-
list_for_each_entry(img_req, &rbd_dev->acquiring_list, lock_item) {
3846+
while (!list_empty(&rbd_dev->acquiring_list)) {
3847+
img_req = list_first_entry(&rbd_dev->acquiring_list,
3848+
struct rbd_img_request, lock_item);
38463849
mutex_lock(&img_req->state_mutex);
38473850
rbd_assert(img_req->state == RBD_IMG_EXCLUSIVE_LOCK);
3851+
if (!result)
3852+
list_move_tail(&img_req->lock_item,
3853+
&rbd_dev->running_list);
3854+
else
3855+
list_del_init(&img_req->lock_item);
38483856
rbd_img_schedule(img_req, result);
38493857
mutex_unlock(&img_req->state_mutex);
38503858
}
3851-
3852-
list_splice_tail_init(&rbd_dev->acquiring_list, &rbd_dev->running_list);
38533859
}
38543860

38553861
static bool locker_equal(const struct ceph_locker *lhs,

0 commit comments

Comments
 (0)