Skip to content

Commit d71369d

Browse files
committed
Merge tag 'block-6.7-2023-12-08' of git://git.kernel.dk/linux
Pull block fixes from Jens Axboe: "Nothing major in here, just miscellanous fixes for MD and NVMe: - NVMe pull request via Keith: - Proper nvme ctrl state setting (Keith) - Passthrough command optimization (Keith) - Spectre fix (Nitesh) - Kconfig clarifications (Shin'ichiro) - Frozen state deadlock fix (Bitao) - Power setting quirk (Georg) - MD pull requests via Song: - 6.7 regresisons with recovery/sync (Yu) - Reshape fix (David)" * tag 'block-6.7-2023-12-08' of git://git.kernel.dk/linux: md: split MD_RECOVERY_NEEDED out of mddev_resume nvme-pci: Add sleep quirk for Kingston drives md: fix stopping sync thread md: don't leave 'MD_RECOVERY_FROZEN' in error path of md_set_readonly() md: fix missing flush of sync_work nvme: fix deadlock between reset and scan nvme: prevent potential spectre v1 gadget nvme: improve NVME_HOST_AUTH and NVME_TARGET_AUTH config descriptions nvme-ioctl: move capable() admin check to the end nvme: ensure reset state check ordering nvme: introduce helper function to get ctrl state md/raid6: use valid sector values to determine if an I/O should wait on the reshape
2 parents 689659c + c6d3ab9 commit d71369d

File tree

12 files changed

+197
-134
lines changed

12 files changed

+197
-134
lines changed

drivers/md/md.c

Lines changed: 76 additions & 68 deletions
Original file line numberDiff line numberDiff line change
@@ -490,7 +490,7 @@ int mddev_suspend(struct mddev *mddev, bool interruptible)
490490
}
491491
EXPORT_SYMBOL_GPL(mddev_suspend);
492492

493-
void mddev_resume(struct mddev *mddev)
493+
static void __mddev_resume(struct mddev *mddev, bool recovery_needed)
494494
{
495495
lockdep_assert_not_held(&mddev->reconfig_mutex);
496496

@@ -507,12 +507,18 @@ void mddev_resume(struct mddev *mddev)
507507
percpu_ref_resurrect(&mddev->active_io);
508508
wake_up(&mddev->sb_wait);
509509

510-
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
510+
if (recovery_needed)
511+
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
511512
md_wakeup_thread(mddev->thread);
512513
md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
513514

514515
mutex_unlock(&mddev->suspend_mutex);
515516
}
517+
518+
void mddev_resume(struct mddev *mddev)
519+
{
520+
return __mddev_resume(mddev, true);
521+
}
516522
EXPORT_SYMBOL_GPL(mddev_resume);
517523

518524
/*
@@ -4840,59 +4846,72 @@ action_show(struct mddev *mddev, char *page)
48404846
return sprintf(page, "%s\n", type);
48414847
}
48424848

4843-
static void stop_sync_thread(struct mddev *mddev)
4849+
/**
4850+
* stop_sync_thread() - wait for sync_thread to stop if it's running.
4851+
* @mddev: the array.
4852+
* @locked: if set, reconfig_mutex will still be held after this function
4853+
* return; if not set, reconfig_mutex will be released after this
4854+
* function return.
4855+
* @check_seq: if set, only wait for curent running sync_thread to stop, noted
4856+
* that new sync_thread can still start.
4857+
*/
4858+
static void stop_sync_thread(struct mddev *mddev, bool locked, bool check_seq)
48444859
{
4845-
if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4846-
return;
4860+
int sync_seq;
48474861

4848-
if (mddev_lock(mddev))
4849-
return;
4862+
if (check_seq)
4863+
sync_seq = atomic_read(&mddev->sync_seq);
48504864

4851-
/*
4852-
* Check again in case MD_RECOVERY_RUNNING is cleared before lock is
4853-
* held.
4854-
*/
48554865
if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
4856-
mddev_unlock(mddev);
4866+
if (!locked)
4867+
mddev_unlock(mddev);
48574868
return;
48584869
}
48594870

4860-
if (work_pending(&mddev->del_work))
4861-
flush_workqueue(md_misc_wq);
4871+
mddev_unlock(mddev);
48624872

48634873
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
48644874
/*
48654875
* Thread might be blocked waiting for metadata update which will now
48664876
* never happen
48674877
*/
48684878
md_wakeup_thread_directly(mddev->sync_thread);
4879+
if (work_pending(&mddev->sync_work))
4880+
flush_work(&mddev->sync_work);
48694881

4870-
mddev_unlock(mddev);
4882+
wait_event(resync_wait,
4883+
!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
4884+
(check_seq && sync_seq != atomic_read(&mddev->sync_seq)));
4885+
4886+
if (locked)
4887+
mddev_lock_nointr(mddev);
48714888
}
48724889

48734890
static void idle_sync_thread(struct mddev *mddev)
48744891
{
4875-
int sync_seq = atomic_read(&mddev->sync_seq);
4876-
48774892
mutex_lock(&mddev->sync_mutex);
48784893
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4879-
stop_sync_thread(mddev);
48804894

4881-
wait_event(resync_wait, sync_seq != atomic_read(&mddev->sync_seq) ||
4882-
!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery));
4895+
if (mddev_lock(mddev)) {
4896+
mutex_unlock(&mddev->sync_mutex);
4897+
return;
4898+
}
48834899

4900+
stop_sync_thread(mddev, false, true);
48844901
mutex_unlock(&mddev->sync_mutex);
48854902
}
48864903

48874904
static void frozen_sync_thread(struct mddev *mddev)
48884905
{
48894906
mutex_lock(&mddev->sync_mutex);
48904907
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4891-
stop_sync_thread(mddev);
48924908

4893-
wait_event(resync_wait, mddev->sync_thread == NULL &&
4894-
!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery));
4909+
if (mddev_lock(mddev)) {
4910+
mutex_unlock(&mddev->sync_mutex);
4911+
return;
4912+
}
48954913

4914+
stop_sync_thread(mddev, false, false);
48964915
mutex_unlock(&mddev->sync_mutex);
48974916
}
48984917

@@ -6264,14 +6283,7 @@ static void md_clean(struct mddev *mddev)
62646283

62656284
static void __md_stop_writes(struct mddev *mddev)
62666285
{
6267-
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6268-
if (work_pending(&mddev->del_work))
6269-
flush_workqueue(md_misc_wq);
6270-
if (mddev->sync_thread) {
6271-
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6272-
md_reap_sync_thread(mddev);
6273-
}
6274-
6286+
stop_sync_thread(mddev, true, false);
62756287
del_timer_sync(&mddev->safemode_timer);
62766288

62776289
if (mddev->pers && mddev->pers->quiesce) {
@@ -6355,25 +6367,16 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
63556367
int err = 0;
63566368
int did_freeze = 0;
63576369

6370+
if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
6371+
return -EBUSY;
6372+
63586373
if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
63596374
did_freeze = 1;
63606375
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
63616376
md_wakeup_thread(mddev->thread);
63626377
}
6363-
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
6364-
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
63656378

6366-
/*
6367-
* Thread might be blocked waiting for metadata update which will now
6368-
* never happen
6369-
*/
6370-
md_wakeup_thread_directly(mddev->sync_thread);
6371-
6372-
if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
6373-
return -EBUSY;
6374-
mddev_unlock(mddev);
6375-
wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
6376-
&mddev->recovery));
6379+
stop_sync_thread(mddev, false, false);
63776380
wait_event(mddev->sb_wait,
63786381
!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
63796382
mddev_lock_nointr(mddev);
@@ -6383,29 +6386,30 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
63836386
mddev->sync_thread ||
63846387
test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
63856388
pr_warn("md: %s still in use.\n",mdname(mddev));
6386-
if (did_freeze) {
6387-
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6388-
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6389-
md_wakeup_thread(mddev->thread);
6390-
}
63916389
err = -EBUSY;
63926390
goto out;
63936391
}
6392+
63946393
if (mddev->pers) {
63956394
__md_stop_writes(mddev);
63966395

6397-
err = -ENXIO;
6398-
if (mddev->ro == MD_RDONLY)
6396+
if (mddev->ro == MD_RDONLY) {
6397+
err = -ENXIO;
63996398
goto out;
6399+
}
6400+
64006401
mddev->ro = MD_RDONLY;
64016402
set_disk_ro(mddev->gendisk, 1);
6403+
}
6404+
6405+
out:
6406+
if ((mddev->pers && !err) || did_freeze) {
64026407
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
64036408
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
64046409
md_wakeup_thread(mddev->thread);
64056410
sysfs_notify_dirent_safe(mddev->sysfs_state);
6406-
err = 0;
64076411
}
6408-
out:
6412+
64096413
mutex_unlock(&mddev->open_mutex);
64106414
return err;
64116415
}
@@ -6426,20 +6430,8 @@ static int do_md_stop(struct mddev *mddev, int mode,
64266430
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
64276431
md_wakeup_thread(mddev->thread);
64286432
}
6429-
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
6430-
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6431-
6432-
/*
6433-
* Thread might be blocked waiting for metadata update which will now
6434-
* never happen
6435-
*/
6436-
md_wakeup_thread_directly(mddev->sync_thread);
64376433

6438-
mddev_unlock(mddev);
6439-
wait_event(resync_wait, (mddev->sync_thread == NULL &&
6440-
!test_bit(MD_RECOVERY_RUNNING,
6441-
&mddev->recovery)));
6442-
mddev_lock_nointr(mddev);
6434+
stop_sync_thread(mddev, true, false);
64436435

64446436
mutex_lock(&mddev->open_mutex);
64456437
if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
@@ -9403,7 +9395,15 @@ static void md_start_sync(struct work_struct *ws)
94039395
goto not_running;
94049396
}
94059397

9406-
suspend ? mddev_unlock_and_resume(mddev) : mddev_unlock(mddev);
9398+
mddev_unlock(mddev);
9399+
/*
9400+
* md_start_sync was triggered by MD_RECOVERY_NEEDED, so we should
9401+
* not set it again. Otherwise, we may cause issue like this one:
9402+
* https://bugzilla.kernel.org/show_bug.cgi?id=218200
9403+
* Therefore, use __mddev_resume(mddev, false).
9404+
*/
9405+
if (suspend)
9406+
__mddev_resume(mddev, false);
94079407
md_wakeup_thread(mddev->sync_thread);
94089408
sysfs_notify_dirent_safe(mddev->sysfs_action);
94099409
md_new_event();
@@ -9415,7 +9415,15 @@ static void md_start_sync(struct work_struct *ws)
94159415
clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
94169416
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
94179417
clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
9418-
suspend ? mddev_unlock_and_resume(mddev) : mddev_unlock(mddev);
9418+
mddev_unlock(mddev);
9419+
/*
9420+
* md_start_sync was triggered by MD_RECOVERY_NEEDED, so we should
9421+
* not set it again. Otherwise, we may cause issue like this one:
9422+
* https://bugzilla.kernel.org/show_bug.cgi?id=218200
9423+
* Therefore, use __mddev_resume(mddev, false).
9424+
*/
9425+
if (suspend)
9426+
__mddev_resume(mddev, false);
94199427

94209428
wake_up(&resync_wait);
94219429
if (test_and_clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery) &&

drivers/md/raid5.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5892,11 +5892,11 @@ static bool stripe_ahead_of_reshape(struct mddev *mddev, struct r5conf *conf,
58925892
int dd_idx;
58935893

58945894
for (dd_idx = 0; dd_idx < sh->disks; dd_idx++) {
5895-
if (dd_idx == sh->pd_idx)
5895+
if (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx)
58965896
continue;
58975897

58985898
min_sector = min(min_sector, sh->dev[dd_idx].sector);
5899-
max_sector = min(max_sector, sh->dev[dd_idx].sector);
5899+
max_sector = max(max_sector, sh->dev[dd_idx].sector);
59005900
}
59015901

59025902
spin_lock_irq(&conf->device_lock);

drivers/nvme/host/Kconfig

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -107,11 +107,12 @@ config NVME_TCP_TLS
107107
If unsure, say N.
108108

109109
config NVME_HOST_AUTH
110-
bool "NVM Express over Fabrics In-Band Authentication"
110+
bool "NVMe over Fabrics In-Band Authentication in host side"
111111
depends on NVME_CORE
112112
select NVME_AUTH
113113
help
114-
This provides support for NVMe over Fabrics In-Band Authentication.
114+
This provides support for NVMe over Fabrics In-Band Authentication in
115+
host side.
115116

116117
If unsure, say N.
117118

0 commit comments

Comments
 (0)