Skip to content

Commit a2e94e8

Browse files
committed
Merge tag 'block-6.17-20250822' of git://git.kernel.dk/linux
Pull block fixes from Jens Axboe: "A set of fixes for block that should go into this tree. A bit larger than what I usually have at this point in time, a lot of that is the continued fixing of the lockdep annotation for queue freezing that we recently added, which has highlighted a number of little issues here and there. This contains: - MD pull request via Yu: - Add a legacy_async_del_gendisk mode, to prevent a user tools regression. New user tools releases will not use such a mode, the old release with a new kernel now will have warning about deprecated behavior, and we prepare to remove this legacy mode after about a year later - The rename in kernel causing user tools build failure, revert the rename in mdp_superblock_s - Fix a regression that interrupted resync can be shown as recover from mdstat or sysfs - Improve file size detection for loop, particularly for networked file systems, by using getattr to get the size rather than the cached inode size. - Hotplug CPU lock vs queue freeze fix - Lockdep fix while updating the number of hardware queues - Fix stacking for PI devices - Silence bio_check_eod() for the known case of device removal where the size is truncated to 0 sectors" * tag 'block-6.17-20250822' of git://git.kernel.dk/linux: block: avoid cpu_hotplug_lock depedency on freeze_lock block: decrement block_rq_qos static key in rq_qos_del() block: skip q->rq_qos check in rq_qos_done_bio() blk-mq: fix lockdep warning in __blk_mq_update_nr_hw_queues block: tone down bio_check_eod loop: use vfs_getattr_nosec for accurate file size loop: Consolidate size calculation logic into lo_calculate_size() block: remove newlines from the warnings in blk_validate_integrity_limits block: handle pi_tuple_size in queue_limits_stack_integrity selftests: ublk: Use ARRAY_SIZE() macro to improve code md: fix sync_action incorrect display during resync md: add helper rdev_needs_recovery() md: keep recovery_cp in mdp_superblock_s md: add legacy_async_del_gendisk mode
2 parents d28de4f + 370ac28 commit a2e94e8

File tree

11 files changed

+169
-83
lines changed

11 files changed

+169
-83
lines changed

block/blk-core.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -557,7 +557,7 @@ static inline int bio_check_eod(struct bio *bio)
557557
sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
558558
unsigned int nr_sectors = bio_sectors(bio);
559559

560-
if (nr_sectors &&
560+
if (nr_sectors && maxsector &&
561561
(nr_sectors > maxsector ||
562562
bio->bi_iter.bi_sector > maxsector - nr_sectors)) {
563563
pr_info_ratelimited("%s: attempt to access beyond end of device\n"

block/blk-mq-debugfs.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -95,6 +95,7 @@ static const char *const blk_queue_flag_name[] = {
9595
QUEUE_FLAG_NAME(SQ_SCHED),
9696
QUEUE_FLAG_NAME(DISABLE_WBT_DEF),
9797
QUEUE_FLAG_NAME(NO_ELV_SWITCH),
98+
QUEUE_FLAG_NAME(QOS_ENABLED),
9899
};
99100
#undef QUEUE_FLAG_NAME
100101

block/blk-mq.c

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -5033,6 +5033,7 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
50335033
unsigned int memflags;
50345034
int i;
50355035
struct xarray elv_tbl, et_tbl;
5036+
bool queues_frozen = false;
50365037

50375038
lockdep_assert_held(&set->tag_list_lock);
50385039

@@ -5056,9 +5057,6 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
50565057
blk_mq_sysfs_unregister_hctxs(q);
50575058
}
50585059

5059-
list_for_each_entry(q, &set->tag_list, tag_set_list)
5060-
blk_mq_freeze_queue_nomemsave(q);
5061-
50625060
/*
50635061
* Switch IO scheduler to 'none', cleaning up the data associated
50645062
* with the previous scheduler. We will switch back once we are done
@@ -5068,6 +5066,9 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
50685066
if (blk_mq_elv_switch_none(q, &elv_tbl))
50695067
goto switch_back;
50705068

5069+
list_for_each_entry(q, &set->tag_list, tag_set_list)
5070+
blk_mq_freeze_queue_nomemsave(q);
5071+
queues_frozen = true;
50715072
if (blk_mq_realloc_tag_set_tags(set, nr_hw_queues) < 0)
50725073
goto switch_back;
50735074

@@ -5091,8 +5092,12 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
50915092
}
50925093
switch_back:
50935094
/* The blk_mq_elv_switch_back unfreezes queue for us. */
5094-
list_for_each_entry(q, &set->tag_list, tag_set_list)
5095+
list_for_each_entry(q, &set->tag_list, tag_set_list) {
5096+
/* switch_back expects queue to be frozen */
5097+
if (!queues_frozen)
5098+
blk_mq_freeze_queue_nomemsave(q);
50955099
blk_mq_elv_switch_back(q, &elv_tbl, &et_tbl);
5100+
}
50965101

50975102
list_for_each_entry(q, &set->tag_list, tag_set_list) {
50985103
blk_mq_sysfs_register_hctxs(q);

block/blk-rq-qos.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,6 @@
22

33
#include "blk-rq-qos.h"
44

5-
__read_mostly DEFINE_STATIC_KEY_FALSE(block_rq_qos);
6-
75
/*
86
* Increment 'v', if 'v' is below 'below'. Returns true if we succeeded,
97
* false if 'v' + 1 would be bigger than 'below'.
@@ -319,8 +317,8 @@ void rq_qos_exit(struct request_queue *q)
319317
struct rq_qos *rqos = q->rq_qos;
320318
q->rq_qos = rqos->next;
321319
rqos->ops->exit(rqos);
322-
static_branch_dec(&block_rq_qos);
323320
}
321+
blk_queue_flag_clear(QUEUE_FLAG_QOS_ENABLED, q);
324322
mutex_unlock(&q->rq_qos_mutex);
325323
}
326324

@@ -346,7 +344,7 @@ int rq_qos_add(struct rq_qos *rqos, struct gendisk *disk, enum rq_qos_id id,
346344
goto ebusy;
347345
rqos->next = q->rq_qos;
348346
q->rq_qos = rqos;
349-
static_branch_inc(&block_rq_qos);
347+
blk_queue_flag_set(QUEUE_FLAG_QOS_ENABLED, q);
350348

351349
blk_mq_unfreeze_queue(q, memflags);
352350

@@ -377,6 +375,8 @@ void rq_qos_del(struct rq_qos *rqos)
377375
break;
378376
}
379377
}
378+
if (!q->rq_qos)
379+
blk_queue_flag_clear(QUEUE_FLAG_QOS_ENABLED, q);
380380
blk_mq_unfreeze_queue(q, memflags);
381381

382382
mutex_lock(&q->debugfs_mutex);

block/blk-rq-qos.h

Lines changed: 31 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@
1212
#include "blk-mq-debugfs.h"
1313

1414
struct blk_mq_debugfs_attr;
15-
extern struct static_key_false block_rq_qos;
1615

1716
enum rq_qos_id {
1817
RQ_QOS_WBT,
@@ -113,43 +112,55 @@ void __rq_qos_queue_depth_changed(struct rq_qos *rqos);
113112

114113
static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
115114
{
116-
if (static_branch_unlikely(&block_rq_qos) && q->rq_qos)
115+
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
116+
q->rq_qos)
117117
__rq_qos_cleanup(q->rq_qos, bio);
118118
}
119119

120120
static inline void rq_qos_done(struct request_queue *q, struct request *rq)
121121
{
122-
if (static_branch_unlikely(&block_rq_qos) && q->rq_qos &&
123-
!blk_rq_is_passthrough(rq))
122+
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
123+
q->rq_qos && !blk_rq_is_passthrough(rq))
124124
__rq_qos_done(q->rq_qos, rq);
125125
}
126126

127127
static inline void rq_qos_issue(struct request_queue *q, struct request *rq)
128128
{
129-
if (static_branch_unlikely(&block_rq_qos) && q->rq_qos)
129+
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
130+
q->rq_qos)
130131
__rq_qos_issue(q->rq_qos, rq);
131132
}
132133

133134
static inline void rq_qos_requeue(struct request_queue *q, struct request *rq)
134135
{
135-
if (static_branch_unlikely(&block_rq_qos) && q->rq_qos)
136+
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
137+
q->rq_qos)
136138
__rq_qos_requeue(q->rq_qos, rq);
137139
}
138140

139141
static inline void rq_qos_done_bio(struct bio *bio)
140142
{
141-
if (static_branch_unlikely(&block_rq_qos) &&
142-
bio->bi_bdev && (bio_flagged(bio, BIO_QOS_THROTTLED) ||
143-
bio_flagged(bio, BIO_QOS_MERGED))) {
144-
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
145-
if (q->rq_qos)
146-
__rq_qos_done_bio(q->rq_qos, bio);
147-
}
143+
struct request_queue *q;
144+
145+
if (!bio->bi_bdev || (!bio_flagged(bio, BIO_QOS_THROTTLED) &&
146+
!bio_flagged(bio, BIO_QOS_MERGED)))
147+
return;
148+
149+
q = bdev_get_queue(bio->bi_bdev);
150+
151+
/*
152+
* If a bio has BIO_QOS_xxx set, it implicitly implies that
153+
* q->rq_qos is present. So, we skip re-checking q->rq_qos
154+
* here as an extra optimization and directly call
155+
* __rq_qos_done_bio().
156+
*/
157+
__rq_qos_done_bio(q->rq_qos, bio);
148158
}
149159

150160
static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
151161
{
152-
if (static_branch_unlikely(&block_rq_qos) && q->rq_qos) {
162+
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
163+
q->rq_qos) {
153164
bio_set_flag(bio, BIO_QOS_THROTTLED);
154165
__rq_qos_throttle(q->rq_qos, bio);
155166
}
@@ -158,22 +169,25 @@ static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
158169
static inline void rq_qos_track(struct request_queue *q, struct request *rq,
159170
struct bio *bio)
160171
{
161-
if (static_branch_unlikely(&block_rq_qos) && q->rq_qos)
172+
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
173+
q->rq_qos)
162174
__rq_qos_track(q->rq_qos, rq, bio);
163175
}
164176

165177
static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
166178
struct bio *bio)
167179
{
168-
if (static_branch_unlikely(&block_rq_qos) && q->rq_qos) {
180+
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
181+
q->rq_qos) {
169182
bio_set_flag(bio, BIO_QOS_MERGED);
170183
__rq_qos_merge(q->rq_qos, rq, bio);
171184
}
172185
}
173186

174187
static inline void rq_qos_queue_depth_changed(struct request_queue *q)
175188
{
176-
if (static_branch_unlikely(&block_rq_qos) && q->rq_qos)
189+
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
190+
q->rq_qos)
177191
__rq_qos_queue_depth_changed(q->rq_qos);
178192
}
179193

block/blk-settings.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -157,25 +157,22 @@ static int blk_validate_integrity_limits(struct queue_limits *lim)
157157
switch (bi->csum_type) {
158158
case BLK_INTEGRITY_CSUM_NONE:
159159
if (bi->pi_tuple_size) {
160-
pr_warn("pi_tuple_size must be 0 when checksum type \
161-
is none\n");
160+
pr_warn("pi_tuple_size must be 0 when checksum type is none\n");
162161
return -EINVAL;
163162
}
164163
break;
165164
case BLK_INTEGRITY_CSUM_CRC:
166165
case BLK_INTEGRITY_CSUM_IP:
167166
if (bi->pi_tuple_size != sizeof(struct t10_pi_tuple)) {
168-
pr_warn("pi_tuple_size mismatch for T10 PI: expected \
169-
%zu, got %u\n",
167+
pr_warn("pi_tuple_size mismatch for T10 PI: expected %zu, got %u\n",
170168
sizeof(struct t10_pi_tuple),
171169
bi->pi_tuple_size);
172170
return -EINVAL;
173171
}
174172
break;
175173
case BLK_INTEGRITY_CSUM_CRC64:
176174
if (bi->pi_tuple_size != sizeof(struct crc64_pi_tuple)) {
177-
pr_warn("pi_tuple_size mismatch for CRC64 PI: \
178-
expected %zu, got %u\n",
175+
pr_warn("pi_tuple_size mismatch for CRC64 PI: expected %zu, got %u\n",
179176
sizeof(struct crc64_pi_tuple),
180177
bi->pi_tuple_size);
181178
return -EINVAL;
@@ -972,6 +969,8 @@ bool queue_limits_stack_integrity(struct queue_limits *t,
972969
goto incompatible;
973970
if (ti->csum_type != bi->csum_type)
974971
goto incompatible;
972+
if (ti->pi_tuple_size != bi->pi_tuple_size)
973+
goto incompatible;
975974
if ((ti->flags & BLK_INTEGRITY_REF_TAG) !=
976975
(bi->flags & BLK_INTEGRITY_REF_TAG))
977976
goto incompatible;
@@ -980,6 +979,7 @@ bool queue_limits_stack_integrity(struct queue_limits *t,
980979
ti->flags |= (bi->flags & BLK_INTEGRITY_DEVICE_CAPABLE) |
981980
(bi->flags & BLK_INTEGRITY_REF_TAG);
982981
ti->csum_type = bi->csum_type;
982+
ti->pi_tuple_size = bi->pi_tuple_size;
983983
ti->metadata_size = bi->metadata_size;
984984
ti->pi_offset = bi->pi_offset;
985985
ti->interval_exp = bi->interval_exp;

drivers/block/loop.c

Lines changed: 21 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -137,32 +137,36 @@ static void loop_global_unlock(struct loop_device *lo, bool global)
137137
static int max_part;
138138
static int part_shift;
139139

140-
static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file)
140+
static loff_t lo_calculate_size(struct loop_device *lo, struct file *file)
141141
{
142+
struct kstat stat;
142143
loff_t loopsize;
144+
int ret;
143145

144-
/* Compute loopsize in bytes */
145-
loopsize = i_size_read(file->f_mapping->host);
146-
if (offset > 0)
147-
loopsize -= offset;
146+
/*
147+
* Get the accurate file size. This provides better results than
148+
* cached inode data, particularly for network filesystems where
149+
* metadata may be stale.
150+
*/
151+
ret = vfs_getattr_nosec(&file->f_path, &stat, STATX_SIZE, 0);
152+
if (ret)
153+
return 0;
154+
155+
loopsize = stat.size;
156+
if (lo->lo_offset > 0)
157+
loopsize -= lo->lo_offset;
148158
/* offset is beyond i_size, weird but possible */
149159
if (loopsize < 0)
150160
return 0;
151-
152-
if (sizelimit > 0 && sizelimit < loopsize)
153-
loopsize = sizelimit;
161+
if (lo->lo_sizelimit > 0 && lo->lo_sizelimit < loopsize)
162+
loopsize = lo->lo_sizelimit;
154163
/*
155164
* Unfortunately, if we want to do I/O on the device,
156165
* the number of 512-byte sectors has to fit into a sector_t.
157166
*/
158167
return loopsize >> 9;
159168
}
160169

161-
static loff_t get_loop_size(struct loop_device *lo, struct file *file)
162-
{
163-
return get_size(lo->lo_offset, lo->lo_sizelimit, file);
164-
}
165-
166170
/*
167171
* We support direct I/O only if lo_offset is aligned with the logical I/O size
168172
* of backing device, and the logical block size of loop is bigger than that of
@@ -569,7 +573,7 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
569573
error = -EINVAL;
570574

571575
/* size of the new backing store needs to be the same */
572-
if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
576+
if (lo_calculate_size(lo, file) != lo_calculate_size(lo, old_file))
573577
goto out_err;
574578

575579
/*
@@ -1063,7 +1067,7 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
10631067
loop_update_dio(lo);
10641068
loop_sysfs_init(lo);
10651069

1066-
size = get_loop_size(lo, file);
1070+
size = lo_calculate_size(lo, file);
10671071
loop_set_size(lo, size);
10681072

10691073
/* Order wrt reading lo_state in loop_validate_file(). */
@@ -1255,8 +1259,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
12551259
if (partscan)
12561260
clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
12571261
if (!err && size_changed) {
1258-
loff_t new_size = get_size(lo->lo_offset, lo->lo_sizelimit,
1259-
lo->lo_backing_file);
1262+
loff_t new_size = lo_calculate_size(lo, lo->lo_backing_file);
12601263
loop_set_size(lo, new_size);
12611264
}
12621265
out_unlock:
@@ -1399,7 +1402,7 @@ static int loop_set_capacity(struct loop_device *lo)
13991402
if (unlikely(lo->lo_state != Lo_bound))
14001403
return -ENXIO;
14011404

1402-
size = get_loop_size(lo, lo->lo_backing_file);
1405+
size = lo_calculate_size(lo, lo->lo_backing_file);
14031406
loop_set_size(lo, size);
14041407

14051408
return 0;

0 commit comments

Comments
 (0)