Skip to content

Commit c55ddd9

Browse files
Christoph Hellwigaxboe
authored andcommitted
block: pass struct queue_limits to the bio splitting helpers
Allow using the splitting helpers on just a queue_limits instead of a full request_queue structure. This will eventually allow file systems or remapping drivers to split REQ_OP_ZONE_APPEND bios based on limits calculated as the minimum common capabilities over multiple devices. Signed-off-by: Christoph Hellwig <[email protected]> Reviewed-by: Damien Le Moal <[email protected]> Reviewed-by: Johannes Thumshirn <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jens Axboe <[email protected]>
1 parent b6dc619 commit c55ddd9

File tree

5 files changed

+68
-72
lines changed

5 files changed

+68
-72
lines changed

block/bio-integrity.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -134,7 +134,7 @@ int bio_integrity_add_page(struct bio *bio, struct page *page,
134134
iv = bip->bip_vec + bip->bip_vcnt;
135135

136136
if (bip->bip_vcnt &&
137-
bvec_gap_to_prev(bdev_get_queue(bio->bi_bdev),
137+
bvec_gap_to_prev(&bdev_get_queue(bio->bi_bdev)->limits,
138138
&bip->bip_vec[bip->bip_vcnt - 1], offset))
139139
return 0;
140140

block/bio.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -965,7 +965,7 @@ int bio_add_hw_page(struct request_queue *q, struct bio *bio,
965965
* would create a gap, disallow it.
966966
*/
967967
bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
968-
if (bvec_gap_to_prev(q, bvec, offset))
968+
if (bvec_gap_to_prev(&q->limits, bvec, offset))
969969
return 0;
970970
}
971971

block/blk-merge.c

Lines changed: 50 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,7 @@ static inline bool bio_will_gap(struct request_queue *q,
8282
bio_get_first_bvec(next, &nb);
8383
if (biovec_phys_mergeable(q, &pb, &nb))
8484
return false;
85-
return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
85+
return __bvec_gap_to_prev(&q->limits, &pb, nb.bv_offset);
8686
}
8787

8888
static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
@@ -100,26 +100,25 @@ static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
100100
* is defined as 'unsigned int', meantime it has to be aligned to with the
101101
* logical block size, which is the minimum accepted unit by hardware.
102102
*/
103-
static unsigned int bio_allowed_max_sectors(struct request_queue *q)
103+
static unsigned int bio_allowed_max_sectors(struct queue_limits *lim)
104104
{
105-
return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9;
105+
return round_down(UINT_MAX, lim->logical_block_size) >> SECTOR_SHIFT;
106106
}
107107

108-
static struct bio *bio_split_discard(struct bio *bio, struct request_queue *q,
108+
static struct bio *bio_split_discard(struct bio *bio, struct queue_limits *lim,
109109
unsigned *nsegs, struct bio_set *bs)
110110
{
111111
unsigned int max_discard_sectors, granularity;
112-
int alignment;
113112
sector_t tmp;
114113
unsigned split_sectors;
115114

116115
*nsegs = 1;
117116

118117
/* Zero-sector (unknown) and one-sector granularities are the same. */
119-
granularity = max(q->limits.discard_granularity >> 9, 1U);
118+
granularity = max(lim->discard_granularity >> 9, 1U);
120119

121-
max_discard_sectors = min(q->limits.max_discard_sectors,
122-
bio_allowed_max_sectors(q));
120+
max_discard_sectors =
121+
min(lim->max_discard_sectors, bio_allowed_max_sectors(lim));
123122
max_discard_sectors -= max_discard_sectors % granularity;
124123

125124
if (unlikely(!max_discard_sectors)) {
@@ -136,9 +135,8 @@ static struct bio *bio_split_discard(struct bio *bio, struct request_queue *q,
136135
* If the next starting sector would be misaligned, stop the discard at
137136
* the previous aligned sector.
138137
*/
139-
alignment = (q->limits.discard_alignment >> 9) % granularity;
140-
141-
tmp = bio->bi_iter.bi_sector + split_sectors - alignment;
138+
tmp = bio->bi_iter.bi_sector + split_sectors -
139+
((lim->discard_alignment >> 9) % granularity);
142140
tmp = sector_div(tmp, granularity);
143141

144142
if (split_sectors > tmp)
@@ -148,17 +146,14 @@ static struct bio *bio_split_discard(struct bio *bio, struct request_queue *q,
148146
}
149147

150148
static struct bio *bio_split_write_zeroes(struct bio *bio,
151-
struct request_queue *q, unsigned *nsegs, struct bio_set *bs)
149+
struct queue_limits *lim, unsigned *nsegs, struct bio_set *bs)
152150
{
153151
*nsegs = 0;
154-
155-
if (!q->limits.max_write_zeroes_sectors)
152+
if (!lim->max_write_zeroes_sectors)
156153
return NULL;
157-
158-
if (bio_sectors(bio) <= q->limits.max_write_zeroes_sectors)
154+
if (bio_sectors(bio) <= lim->max_write_zeroes_sectors)
159155
return NULL;
160-
161-
return bio_split(bio, q->limits.max_write_zeroes_sectors, GFP_NOIO, bs);
156+
return bio_split(bio, lim->max_write_zeroes_sectors, GFP_NOIO, bs);
162157
}
163158

164159
/*
@@ -170,16 +165,16 @@ static struct bio *bio_split_write_zeroes(struct bio *bio,
170165
* aligned to a physical block boundary.
171166
*/
172167
static inline unsigned get_max_io_size(struct bio *bio,
173-
struct request_queue *q)
168+
struct queue_limits *lim)
174169
{
175-
unsigned pbs = queue_physical_block_size(q) >> SECTOR_SHIFT;
176-
unsigned lbs = queue_logical_block_size(q) >> SECTOR_SHIFT;
177-
unsigned max_sectors = queue_max_sectors(q), start, end;
170+
unsigned pbs = lim->physical_block_size >> SECTOR_SHIFT;
171+
unsigned lbs = lim->logical_block_size >> SECTOR_SHIFT;
172+
unsigned max_sectors = lim->max_sectors, start, end;
178173

179-
if (q->limits.chunk_sectors) {
174+
if (lim->chunk_sectors) {
180175
max_sectors = min(max_sectors,
181176
blk_chunk_sectors_left(bio->bi_iter.bi_sector,
182-
q->limits.chunk_sectors));
177+
lim->chunk_sectors));
183178
}
184179

185180
start = bio->bi_iter.bi_sector & (pbs - 1);
@@ -189,11 +184,10 @@ static inline unsigned get_max_io_size(struct bio *bio,
189184
return max_sectors & ~(lbs - 1);
190185
}
191186

192-
static inline unsigned get_max_segment_size(const struct request_queue *q,
193-
struct page *start_page,
194-
unsigned long offset)
187+
static inline unsigned get_max_segment_size(struct queue_limits *lim,
188+
struct page *start_page, unsigned long offset)
195189
{
196-
unsigned long mask = queue_segment_boundary(q);
190+
unsigned long mask = lim->seg_boundary_mask;
197191

198192
offset = mask & (page_to_phys(start_page) + offset);
199193

@@ -202,12 +196,12 @@ static inline unsigned get_max_segment_size(const struct request_queue *q,
202196
* on 32bit arch, use queue's max segment size when that happens.
203197
*/
204198
return min_not_zero(mask - offset + 1,
205-
(unsigned long)queue_max_segment_size(q));
199+
(unsigned long)lim->max_segment_size);
206200
}
207201

208202
/**
209203
* bvec_split_segs - verify whether or not a bvec should be split in the middle
210-
* @q: [in] request queue associated with the bio associated with @bv
204+
* @lim: [in] queue limits to split based on
211205
* @bv: [in] bvec to examine
212206
* @nsegs: [in,out] Number of segments in the bio being built. Incremented
213207
* by the number of segments from @bv that may be appended to that
@@ -225,26 +219,25 @@ static inline unsigned get_max_segment_size(const struct request_queue *q,
225219
* *@nsegs segments and *@sectors sectors would make that bio unacceptable for
226220
* the block driver.
227221
*/
228-
static bool bvec_split_segs(const struct request_queue *q,
229-
const struct bio_vec *bv, unsigned *nsegs,
230-
unsigned *bytes, unsigned max_segs,
231-
unsigned max_bytes)
222+
static bool bvec_split_segs(struct queue_limits *lim, const struct bio_vec *bv,
223+
unsigned *nsegs, unsigned *bytes, unsigned max_segs,
224+
unsigned max_bytes)
232225
{
233226
unsigned max_len = min(max_bytes, UINT_MAX) - *bytes;
234227
unsigned len = min(bv->bv_len, max_len);
235228
unsigned total_len = 0;
236229
unsigned seg_size = 0;
237230

238231
while (len && *nsegs < max_segs) {
239-
seg_size = get_max_segment_size(q, bv->bv_page,
232+
seg_size = get_max_segment_size(lim, bv->bv_page,
240233
bv->bv_offset + total_len);
241234
seg_size = min(seg_size, len);
242235

243236
(*nsegs)++;
244237
total_len += seg_size;
245238
len -= seg_size;
246239

247-
if ((bv->bv_offset + total_len) & queue_virt_boundary(q))
240+
if ((bv->bv_offset + total_len) & lim->virt_boundary_mask)
248241
break;
249242
}
250243

@@ -257,7 +250,7 @@ static bool bvec_split_segs(const struct request_queue *q,
257250
/**
258251
* bio_split_rw - split a bio in two bios
259252
* @bio: [in] bio to be split
260-
* @q: [in] request queue pointer
253+
* @lim: [in] queue limits to split based on
261254
* @segs: [out] number of segments in the bio with the first half of the sectors
262255
* @bs: [in] bio set to allocate the clone from
263256
* @max_bytes: [in] maximum number of bytes per bio
@@ -274,30 +267,30 @@ static bool bvec_split_segs(const struct request_queue *q,
274267
* responsible for ensuring that @bs is only destroyed after processing of the
275268
* split bio has finished.
276269
*/
277-
static struct bio *bio_split_rw(struct bio *bio, struct request_queue *q,
270+
static struct bio *bio_split_rw(struct bio *bio, struct queue_limits *lim,
278271
unsigned *segs, struct bio_set *bs, unsigned max_bytes)
279272
{
280273
struct bio_vec bv, bvprv, *bvprvp = NULL;
281274
struct bvec_iter iter;
282275
unsigned nsegs = 0, bytes = 0;
283-
const unsigned max_segs = queue_max_segments(q);
284276

285277
bio_for_each_bvec(bv, bio, iter) {
286278
/*
287279
* If the queue doesn't support SG gaps and adding this
288280
* offset would create a gap, disallow it.
289281
*/
290-
if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
282+
if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv.bv_offset))
291283
goto split;
292284

293-
if (nsegs < max_segs &&
285+
if (nsegs < lim->max_segments &&
294286
bytes + bv.bv_len <= max_bytes &&
295287
bv.bv_offset + bv.bv_len <= PAGE_SIZE) {
296288
nsegs++;
297289
bytes += bv.bv_len;
298-
} else if (bvec_split_segs(q, &bv, &nsegs, &bytes, max_segs,
299-
max_bytes)) {
300-
goto split;
290+
} else {
291+
if (bvec_split_segs(lim, &bv, &nsegs, &bytes,
292+
lim->max_segments, max_bytes))
293+
goto split;
301294
}
302295

303296
bvprv = bv;
@@ -314,7 +307,7 @@ static struct bio *bio_split_rw(struct bio *bio, struct request_queue *q,
314307
* split size so that each bio is properly block size aligned, even if
315308
* we do not use the full hardware limits.
316309
*/
317-
bytes = ALIGN_DOWN(bytes, queue_logical_block_size(q));
310+
bytes = ALIGN_DOWN(bytes, lim->logical_block_size);
318311

319312
/*
320313
* Bio splitting may cause subtle trouble such as hang when doing sync
@@ -328,7 +321,7 @@ static struct bio *bio_split_rw(struct bio *bio, struct request_queue *q,
328321
/**
329322
* __bio_split_to_limits - split a bio to fit the queue limits
330323
* @bio: bio to be split
331-
* @q: request_queue new bio is being queued at
324+
* @lim: queue limits to split based on
332325
* @nr_segs: returns the number of segments in the returned bio
333326
*
334327
* Check if @bio needs splitting based on the queue limits, and if so split off
@@ -338,7 +331,7 @@ static struct bio *bio_split_rw(struct bio *bio, struct request_queue *q,
338331
* The split bio is allocated from @q->bio_split, which is provided by the
339332
* block layer.
340333
*/
341-
struct bio *__bio_split_to_limits(struct bio *bio, struct request_queue *q,
334+
struct bio *__bio_split_to_limits(struct bio *bio, struct queue_limits *lim,
342335
unsigned int *nr_segs)
343336
{
344337
struct bio_set *bs = &bio->bi_bdev->bd_disk->bio_split;
@@ -347,14 +340,14 @@ struct bio *__bio_split_to_limits(struct bio *bio, struct request_queue *q,
347340
switch (bio_op(bio)) {
348341
case REQ_OP_DISCARD:
349342
case REQ_OP_SECURE_ERASE:
350-
split = bio_split_discard(bio, q, nr_segs, bs);
343+
split = bio_split_discard(bio, lim, nr_segs, bs);
351344
break;
352345
case REQ_OP_WRITE_ZEROES:
353-
split = bio_split_write_zeroes(bio, q, nr_segs, bs);
346+
split = bio_split_write_zeroes(bio, lim, nr_segs, bs);
354347
break;
355348
default:
356-
split = bio_split_rw(bio, q, nr_segs, bs,
357-
get_max_io_size(bio, q) << SECTOR_SHIFT);
349+
split = bio_split_rw(bio, lim, nr_segs, bs,
350+
get_max_io_size(bio, lim) << SECTOR_SHIFT);
358351
break;
359352
}
360353

@@ -384,11 +377,11 @@ struct bio *__bio_split_to_limits(struct bio *bio, struct request_queue *q,
384377
*/
385378
struct bio *bio_split_to_limits(struct bio *bio)
386379
{
387-
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
380+
struct queue_limits *lim = &bdev_get_queue(bio->bi_bdev)->limits;
388381
unsigned int nr_segs;
389382

390-
if (bio_may_exceed_limits(bio, q))
391-
return __bio_split_to_limits(bio, q, &nr_segs);
383+
if (bio_may_exceed_limits(bio, lim))
384+
return __bio_split_to_limits(bio, lim, &nr_segs);
392385
return bio;
393386
}
394387
EXPORT_SYMBOL(bio_split_to_limits);
@@ -421,7 +414,7 @@ unsigned int blk_recalc_rq_segments(struct request *rq)
421414
}
422415

423416
rq_for_each_bvec(bv, rq, iter)
424-
bvec_split_segs(rq->q, &bv, &nr_phys_segs, &bytes,
417+
bvec_split_segs(&rq->q->limits, &bv, &nr_phys_segs, &bytes,
425418
UINT_MAX, UINT_MAX);
426419
return nr_phys_segs;
427420
}
@@ -452,8 +445,8 @@ static unsigned blk_bvec_map_sg(struct request_queue *q,
452445

453446
while (nbytes > 0) {
454447
unsigned offset = bvec->bv_offset + total;
455-
unsigned len = min(get_max_segment_size(q, bvec->bv_page,
456-
offset), nbytes);
448+
unsigned len = min(get_max_segment_size(&q->limits,
449+
bvec->bv_page, offset), nbytes);
457450
struct page *page = bvec->bv_page;
458451

459452
/*

block/blk-mq.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2816,8 +2816,8 @@ void blk_mq_submit_bio(struct bio *bio)
28162816
blk_status_t ret;
28172817

28182818
bio = blk_queue_bounce(bio, q);
2819-
if (bio_may_exceed_limits(bio, q))
2820-
bio = __bio_split_to_limits(bio, q, &nr_segs);
2819+
if (bio_may_exceed_limits(bio, &q->limits))
2820+
bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
28212821

28222822
if (!bio_integrity_prep(bio))
28232823
return;

block/blk.h

Lines changed: 14 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -97,23 +97,23 @@ static inline bool biovec_phys_mergeable(struct request_queue *q,
9797
return true;
9898
}
9999

100-
static inline bool __bvec_gap_to_prev(struct request_queue *q,
100+
static inline bool __bvec_gap_to_prev(struct queue_limits *lim,
101101
struct bio_vec *bprv, unsigned int offset)
102102
{
103-
return (offset & queue_virt_boundary(q)) ||
104-
((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
103+
return (offset & lim->virt_boundary_mask) ||
104+
((bprv->bv_offset + bprv->bv_len) & lim->virt_boundary_mask);
105105
}
106106

107107
/*
108108
* Check if adding a bio_vec after bprv with offset would create a gap in
109109
* the SG list. Most drivers don't care about this, but some do.
110110
*/
111-
static inline bool bvec_gap_to_prev(struct request_queue *q,
111+
static inline bool bvec_gap_to_prev(struct queue_limits *lim,
112112
struct bio_vec *bprv, unsigned int offset)
113113
{
114-
if (!queue_virt_boundary(q))
114+
if (!lim->virt_boundary_mask)
115115
return false;
116-
return __bvec_gap_to_prev(q, bprv, offset);
116+
return __bvec_gap_to_prev(lim, bprv, offset);
117117
}
118118

119119
static inline bool rq_mergeable(struct request *rq)
@@ -189,7 +189,8 @@ static inline bool integrity_req_gap_back_merge(struct request *req,
189189
struct bio_integrity_payload *bip = bio_integrity(req->bio);
190190
struct bio_integrity_payload *bip_next = bio_integrity(next);
191191

192-
return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
192+
return bvec_gap_to_prev(&req->q->limits,
193+
&bip->bip_vec[bip->bip_vcnt - 1],
193194
bip_next->bip_vec[0].bv_offset);
194195
}
195196

@@ -199,7 +200,8 @@ static inline bool integrity_req_gap_front_merge(struct request *req,
199200
struct bio_integrity_payload *bip = bio_integrity(bio);
200201
struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
201202

202-
return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
203+
return bvec_gap_to_prev(&req->q->limits,
204+
&bip->bip_vec[bip->bip_vcnt - 1],
203205
bip_next->bip_vec[0].bv_offset);
204206
}
205207

@@ -288,7 +290,8 @@ ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
288290
ssize_t part_timeout_store(struct device *, struct device_attribute *,
289291
const char *, size_t);
290292

291-
static inline bool bio_may_exceed_limits(struct bio *bio, struct request_queue *q)
293+
static inline bool bio_may_exceed_limits(struct bio *bio,
294+
struct queue_limits *lim)
292295
{
293296
switch (bio_op(bio)) {
294297
case REQ_OP_DISCARD:
@@ -307,11 +310,11 @@ static inline bool bio_may_exceed_limits(struct bio *bio, struct request_queue *
307310
* to the performance impact of cloned bios themselves the loop below
308311
* doesn't matter anyway.
309312
*/
310-
return q->limits.chunk_sectors || bio->bi_vcnt != 1 ||
313+
return lim->chunk_sectors || bio->bi_vcnt != 1 ||
311314
bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > PAGE_SIZE;
312315
}
313316

314-
struct bio *__bio_split_to_limits(struct bio *bio, struct request_queue *q,
317+
struct bio *__bio_split_to_limits(struct bio *bio, struct queue_limits *lim,
315318
unsigned int *nr_segs);
316319
int ll_back_merge_fn(struct request *req, struct bio *bio,
317320
unsigned int nr_segs);

0 commit comments

Comments
 (0)