Skip to content

Commit 462abc9

Browse files
committed
Merge tag 'block-5.19-2022-06-16' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe: - NVMe pull request from Christoph - Quirks, quirks, quirks to work around buggy consumer grade devices (Keith Bush, Ning Wang, Stefan Reiter, Rasheed Hsueh) - Better kernel messages for devices that need quirking (Keith Bush) - Make a kernel message more useful (Thomas Weißschuh) - MD pull request from Song, with a few fixes - blk-mq sysfs locking fixes (Ming) - BFQ stats fix (Bart) - blk-mq offline queue fix (Bart) - blk-mq flush request tag fix (Ming) * tag 'block-5.19-2022-06-16' of git://git.kernel.dk/linux-block: block/bfq: Enable I/O statistics blk-mq: don't clear flush_rq from tags->rqs[] blk-mq: avoid to touch q->elevator without any protection blk-mq: protect q->elevator by ->sysfs_lock in blk_mq_elv_switch_none block: Fix handling of offline queues in blk_mq_alloc_request_hctx() md/raid5-ppl: Fix argument order in bio_alloc_bioset() Revert "md: don't unregister sync_thread with reconfig_mutex held" nvme-pci: disable write zeros support on UMIC and Samsung SSDs nvme-pci: avoid the deepest sleep state on ZHITAI TiPro7000 SSDs nvme-pci: sk hynix p31 has bogus namespace ids nvme-pci: smi has bogus namespace ids nvme-pci: phison e12 has bogus namespace ids nvme-pci: add NVME_QUIRK_BOGUS_NID for ADATA XPG GAMMIX S50 nvme-pci: add trouble shooting steps for timeouts nvme: add bug report info for global duplicate id nvme: add device name to warning in uuid_show()
2 parents f8e174c + b96f3ca commit 462abc9

File tree

13 files changed

+106
-38
lines changed

13 files changed

+106
-38
lines changed

block/bfq-iosched.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7046,6 +7046,7 @@ static void bfq_exit_queue(struct elevator_queue *e)
70467046
spin_unlock_irq(&bfqd->lock);
70477047
#endif
70487048

7049+
blk_stat_disable_accounting(bfqd->queue);
70497050
wbt_enable_default(bfqd->queue);
70507051

70517052
kfree(bfqd);
@@ -7188,7 +7189,12 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
71887189
bfq_init_root_group(bfqd->root_group, bfqd);
71897190
bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group);
71907191

7192+
/* We dispatch from request queue wide instead of hw queue */
7193+
blk_queue_flag_set(QUEUE_FLAG_SQ_SCHED, q);
7194+
71917195
wbt_disable_default(q);
7196+
blk_stat_enable_accounting(q);
7197+
71927198
return 0;
71937199

71947200
out_free:

block/blk-mq-sched.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -564,6 +564,7 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
564564
int ret;
565565

566566
if (!e) {
567+
blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED, q);
567568
q->elevator = NULL;
568569
q->nr_requests = q->tag_set->queue_depth;
569570
return 0;

block/blk-mq.c

Lines changed: 10 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -579,6 +579,8 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
579579
if (!blk_mq_hw_queue_mapped(data.hctx))
580580
goto out_queue_exit;
581581
cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask);
582+
if (cpu >= nr_cpu_ids)
583+
goto out_queue_exit;
582584
data.ctx = __blk_mq_get_ctx(q, cpu);
583585

584586
if (!q->elevator)
@@ -2140,20 +2142,6 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
21402142
}
21412143
EXPORT_SYMBOL(blk_mq_run_hw_queue);
21422144

2143-
/*
2144-
* Is the request queue handled by an IO scheduler that does not respect
2145-
* hardware queues when dispatching?
2146-
*/
2147-
static bool blk_mq_has_sqsched(struct request_queue *q)
2148-
{
2149-
struct elevator_queue *e = q->elevator;
2150-
2151-
if (e && e->type->ops.dispatch_request &&
2152-
!(e->type->elevator_features & ELEVATOR_F_MQ_AWARE))
2153-
return true;
2154-
return false;
2155-
}
2156-
21572145
/*
21582146
* Return prefered queue to dispatch from (if any) for non-mq aware IO
21592147
* scheduler.
@@ -2186,7 +2174,7 @@ void blk_mq_run_hw_queues(struct request_queue *q, bool async)
21862174
unsigned long i;
21872175

21882176
sq_hctx = NULL;
2189-
if (blk_mq_has_sqsched(q))
2177+
if (blk_queue_sq_sched(q))
21902178
sq_hctx = blk_mq_get_sq_hctx(q);
21912179
queue_for_each_hw_ctx(q, hctx, i) {
21922180
if (blk_mq_hctx_stopped(hctx))
@@ -2214,7 +2202,7 @@ void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs)
22142202
unsigned long i;
22152203

22162204
sq_hctx = NULL;
2217-
if (blk_mq_has_sqsched(q))
2205+
if (blk_queue_sq_sched(q))
22182206
sq_hctx = blk_mq_get_sq_hctx(q);
22192207
queue_for_each_hw_ctx(q, hctx, i) {
22202208
if (blk_mq_hctx_stopped(hctx))
@@ -3443,8 +3431,9 @@ static void blk_mq_exit_hctx(struct request_queue *q,
34433431
if (blk_mq_hw_queue_mapped(hctx))
34443432
blk_mq_tag_idle(hctx);
34453433

3446-
blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx],
3447-
set->queue_depth, flush_rq);
3434+
if (blk_queue_init_done(q))
3435+
blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx],
3436+
set->queue_depth, flush_rq);
34483437
if (set->ops->exit_request)
34493438
set->ops->exit_request(set, flush_rq, hctx_idx);
34503439

@@ -4438,12 +4427,14 @@ static bool blk_mq_elv_switch_none(struct list_head *head,
44384427
if (!qe)
44394428
return false;
44404429

4430+
/* q->elevator needs protection from ->sysfs_lock */
4431+
mutex_lock(&q->sysfs_lock);
4432+
44414433
INIT_LIST_HEAD(&qe->node);
44424434
qe->q = q;
44434435
qe->type = q->elevator->type;
44444436
list_add(&qe->node, head);
44454437

4446-
mutex_lock(&q->sysfs_lock);
44474438
/*
44484439
* After elevator_switch_mq, the previous elevator_queue will be
44494440
* released by elevator_release. The reference of the io scheduler

block/kyber-iosched.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -421,6 +421,8 @@ static int kyber_init_sched(struct request_queue *q, struct elevator_type *e)
421421

422422
blk_stat_enable_accounting(q);
423423

424+
blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED, q);
425+
424426
eq->elevator_data = kqd;
425427
q->elevator = eq;
426428

@@ -1033,7 +1035,6 @@ static struct elevator_type kyber_sched = {
10331035
#endif
10341036
.elevator_attrs = kyber_sched_attrs,
10351037
.elevator_name = "kyber",
1036-
.elevator_features = ELEVATOR_F_MQ_AWARE,
10371038
.elevator_owner = THIS_MODULE,
10381039
};
10391040

block/mq-deadline.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -642,6 +642,9 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
642642
spin_lock_init(&dd->lock);
643643
spin_lock_init(&dd->zone_lock);
644644

645+
/* We dispatch from request queue wide instead of hw queue */
646+
blk_queue_flag_set(QUEUE_FLAG_SQ_SCHED, q);
647+
645648
q->elevator = eq;
646649
return 0;
647650

drivers/md/dm-raid.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3725,7 +3725,7 @@ static int raid_message(struct dm_target *ti, unsigned int argc, char **argv,
37253725
if (!strcasecmp(argv[0], "idle") || !strcasecmp(argv[0], "frozen")) {
37263726
if (mddev->sync_thread) {
37273727
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3728-
md_reap_sync_thread(mddev, false);
3728+
md_reap_sync_thread(mddev);
37293729
}
37303730
} else if (decipher_sync_action(mddev, mddev->recovery) != st_idle)
37313731
return -EBUSY;

drivers/md/md.c

Lines changed: 5 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -4831,7 +4831,7 @@ action_store(struct mddev *mddev, const char *page, size_t len)
48314831
flush_workqueue(md_misc_wq);
48324832
if (mddev->sync_thread) {
48334833
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4834-
md_reap_sync_thread(mddev, true);
4834+
md_reap_sync_thread(mddev);
48354835
}
48364836
mddev_unlock(mddev);
48374837
}
@@ -6197,7 +6197,7 @@ static void __md_stop_writes(struct mddev *mddev)
61976197
flush_workqueue(md_misc_wq);
61986198
if (mddev->sync_thread) {
61996199
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6200-
md_reap_sync_thread(mddev, true);
6200+
md_reap_sync_thread(mddev);
62016201
}
62026202

62036203
del_timer_sync(&mddev->safemode_timer);
@@ -9303,7 +9303,7 @@ void md_check_recovery(struct mddev *mddev)
93039303
* ->spare_active and clear saved_raid_disk
93049304
*/
93059305
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
9306-
md_reap_sync_thread(mddev, true);
9306+
md_reap_sync_thread(mddev);
93079307
clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
93089308
clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
93099309
clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
@@ -9338,7 +9338,7 @@ void md_check_recovery(struct mddev *mddev)
93389338
goto unlock;
93399339
}
93409340
if (mddev->sync_thread) {
9341-
md_reap_sync_thread(mddev, true);
9341+
md_reap_sync_thread(mddev);
93429342
goto unlock;
93439343
}
93449344
/* Set RUNNING before clearing NEEDED to avoid
@@ -9411,18 +9411,14 @@ void md_check_recovery(struct mddev *mddev)
94119411
}
94129412
EXPORT_SYMBOL(md_check_recovery);
94139413

9414-
void md_reap_sync_thread(struct mddev *mddev, bool reconfig_mutex_held)
9414+
void md_reap_sync_thread(struct mddev *mddev)
94159415
{
94169416
struct md_rdev *rdev;
94179417
sector_t old_dev_sectors = mddev->dev_sectors;
94189418
bool is_reshaped = false;
94199419

9420-
if (reconfig_mutex_held)
9421-
mddev_unlock(mddev);
94229420
/* resync has finished, collect result */
94239421
md_unregister_thread(&mddev->sync_thread);
9424-
if (reconfig_mutex_held)
9425-
mddev_lock_nointr(mddev);
94269422
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
94279423
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
94289424
mddev->degraded != mddev->raid_disks) {

drivers/md/md.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -719,7 +719,7 @@ extern struct md_thread *md_register_thread(
719719
extern void md_unregister_thread(struct md_thread **threadp);
720720
extern void md_wakeup_thread(struct md_thread *thread);
721721
extern void md_check_recovery(struct mddev *mddev);
722-
extern void md_reap_sync_thread(struct mddev *mddev, bool reconfig_mutex_held);
722+
extern void md_reap_sync_thread(struct mddev *mddev);
723723
extern int mddev_init_writes_pending(struct mddev *mddev);
724724
extern bool md_write_start(struct mddev *mddev, struct bio *bi);
725725
extern void md_write_inc(struct mddev *mddev, struct bio *bi);

drivers/md/raid5-ppl.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -629,9 +629,9 @@ static void ppl_do_flush(struct ppl_io_unit *io)
629629
if (bdev) {
630630
struct bio *bio;
631631

632-
bio = bio_alloc_bioset(bdev, 0, GFP_NOIO,
632+
bio = bio_alloc_bioset(bdev, 0,
633633
REQ_OP_WRITE | REQ_PREFLUSH,
634-
&ppl_conf->flush_bs);
634+
GFP_NOIO, &ppl_conf->flush_bs);
635635
bio->bi_private = io;
636636
bio->bi_end_io = ppl_flush_endio;
637637

drivers/nvme/host/core.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3285,8 +3285,8 @@ static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
32853285
* we have no UUID set
32863286
*/
32873287
if (uuid_is_null(&ids->uuid)) {
3288-
printk_ratelimited(KERN_WARNING
3289-
"No UUID available providing old NGUID\n");
3288+
dev_warn_ratelimited(dev,
3289+
"No UUID available providing old NGUID\n");
32903290
return sysfs_emit(buf, "%pU\n", ids->nguid);
32913291
}
32923292
return sysfs_emit(buf, "%pU\n", &ids->uuid);
@@ -3863,6 +3863,7 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
38633863
if (ret) {
38643864
dev_err(ctrl->device,
38653865
"globally duplicate IDs for nsid %d\n", nsid);
3866+
nvme_print_device_info(ctrl);
38663867
return ret;
38673868
}
38683869

0 commit comments

Comments
 (0)