@@ -82,7 +82,7 @@ static inline bool bio_will_gap(struct request_queue *q,
82
82
bio_get_first_bvec (next , & nb );
83
83
if (biovec_phys_mergeable (q , & pb , & nb ))
84
84
return false;
85
- return __bvec_gap_to_prev (q , & pb , nb .bv_offset );
85
+ return __bvec_gap_to_prev (& q -> limits , & pb , nb .bv_offset );
86
86
}
87
87
88
88
static inline bool req_gap_back_merge (struct request * req , struct bio * bio )
@@ -100,26 +100,25 @@ static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
100
100
* is defined as 'unsigned int', meantime it has to be aligned to with the
101
101
* logical block size, which is the minimum accepted unit by hardware.
102
102
*/
103
- static unsigned int bio_allowed_max_sectors (struct request_queue * q )
103
+ static unsigned int bio_allowed_max_sectors (struct queue_limits * lim )
104
104
{
105
- return round_down (UINT_MAX , queue_logical_block_size ( q )) >> 9 ;
105
+ return round_down (UINT_MAX , lim -> logical_block_size ) >> SECTOR_SHIFT ;
106
106
}
107
107
108
- static struct bio * bio_split_discard (struct bio * bio , struct request_queue * q ,
108
+ static struct bio * bio_split_discard (struct bio * bio , struct queue_limits * lim ,
109
109
unsigned * nsegs , struct bio_set * bs )
110
110
{
111
111
unsigned int max_discard_sectors , granularity ;
112
- int alignment ;
113
112
sector_t tmp ;
114
113
unsigned split_sectors ;
115
114
116
115
* nsegs = 1 ;
117
116
118
117
/* Zero-sector (unknown) and one-sector granularities are the same. */
119
- granularity = max (q -> limits . discard_granularity >> 9 , 1U );
118
+ granularity = max (lim -> discard_granularity >> 9 , 1U );
120
119
121
- max_discard_sectors = min ( q -> limits . max_discard_sectors ,
122
- bio_allowed_max_sectors (q ));
120
+ max_discard_sectors =
121
+ min ( lim -> max_discard_sectors , bio_allowed_max_sectors (lim ));
123
122
max_discard_sectors -= max_discard_sectors % granularity ;
124
123
125
124
if (unlikely (!max_discard_sectors )) {
@@ -136,9 +135,8 @@ static struct bio *bio_split_discard(struct bio *bio, struct request_queue *q,
136
135
* If the next starting sector would be misaligned, stop the discard at
137
136
* the previous aligned sector.
138
137
*/
139
- alignment = (q -> limits .discard_alignment >> 9 ) % granularity ;
140
-
141
- tmp = bio -> bi_iter .bi_sector + split_sectors - alignment ;
138
+ tmp = bio -> bi_iter .bi_sector + split_sectors -
139
+ ((lim -> discard_alignment >> 9 ) % granularity );
142
140
tmp = sector_div (tmp , granularity );
143
141
144
142
if (split_sectors > tmp )
@@ -148,17 +146,14 @@ static struct bio *bio_split_discard(struct bio *bio, struct request_queue *q,
148
146
}
149
147
150
148
static struct bio * bio_split_write_zeroes (struct bio * bio ,
151
- struct request_queue * q , unsigned * nsegs , struct bio_set * bs )
149
+ struct queue_limits * lim , unsigned * nsegs , struct bio_set * bs )
152
150
{
153
151
* nsegs = 0 ;
154
-
155
- if (!q -> limits .max_write_zeroes_sectors )
152
+ if (!lim -> max_write_zeroes_sectors )
156
153
return NULL ;
157
-
158
- if (bio_sectors (bio ) <= q -> limits .max_write_zeroes_sectors )
154
+ if (bio_sectors (bio ) <= lim -> max_write_zeroes_sectors )
159
155
return NULL ;
160
-
161
- return bio_split (bio , q -> limits .max_write_zeroes_sectors , GFP_NOIO , bs );
156
+ return bio_split (bio , lim -> max_write_zeroes_sectors , GFP_NOIO , bs );
162
157
}
163
158
164
159
/*
@@ -170,16 +165,16 @@ static struct bio *bio_split_write_zeroes(struct bio *bio,
170
165
* aligned to a physical block boundary.
171
166
*/
172
167
static inline unsigned get_max_io_size (struct bio * bio ,
173
- struct request_queue * q )
168
+ struct queue_limits * lim )
174
169
{
175
- unsigned pbs = queue_physical_block_size ( q ) >> SECTOR_SHIFT ;
176
- unsigned lbs = queue_logical_block_size ( q ) >> SECTOR_SHIFT ;
177
- unsigned max_sectors = queue_max_sectors ( q ) , start , end ;
170
+ unsigned pbs = lim -> physical_block_size >> SECTOR_SHIFT ;
171
+ unsigned lbs = lim -> logical_block_size >> SECTOR_SHIFT ;
172
+ unsigned max_sectors = lim -> max_sectors , start , end ;
178
173
179
- if (q -> limits . chunk_sectors ) {
174
+ if (lim -> chunk_sectors ) {
180
175
max_sectors = min (max_sectors ,
181
176
blk_chunk_sectors_left (bio -> bi_iter .bi_sector ,
182
- q -> limits . chunk_sectors ));
177
+ lim -> chunk_sectors ));
183
178
}
184
179
185
180
start = bio -> bi_iter .bi_sector & (pbs - 1 );
@@ -189,11 +184,10 @@ static inline unsigned get_max_io_size(struct bio *bio,
189
184
return max_sectors & ~(lbs - 1 );
190
185
}
191
186
192
- static inline unsigned get_max_segment_size (const struct request_queue * q ,
193
- struct page * start_page ,
194
- unsigned long offset )
187
+ static inline unsigned get_max_segment_size (struct queue_limits * lim ,
188
+ struct page * start_page , unsigned long offset )
195
189
{
196
- unsigned long mask = queue_segment_boundary ( q ) ;
190
+ unsigned long mask = lim -> seg_boundary_mask ;
197
191
198
192
offset = mask & (page_to_phys (start_page ) + offset );
199
193
@@ -202,12 +196,12 @@ static inline unsigned get_max_segment_size(const struct request_queue *q,
202
196
* on 32bit arch, use queue's max segment size when that happens.
203
197
*/
204
198
return min_not_zero (mask - offset + 1 ,
205
- (unsigned long )queue_max_segment_size ( q ) );
199
+ (unsigned long )lim -> max_segment_size );
206
200
}
207
201
208
202
/**
209
203
* bvec_split_segs - verify whether or not a bvec should be split in the middle
210
- * @q : [in] request queue associated with the bio associated with @bv
204
+ * @lim : [in] queue limits to split based on
211
205
* @bv: [in] bvec to examine
212
206
* @nsegs: [in,out] Number of segments in the bio being built. Incremented
213
207
* by the number of segments from @bv that may be appended to that
@@ -225,26 +219,25 @@ static inline unsigned get_max_segment_size(const struct request_queue *q,
225
219
* *@nsegs segments and *@sectors sectors would make that bio unacceptable for
226
220
* the block driver.
227
221
*/
228
- static bool bvec_split_segs (const struct request_queue * q ,
229
- const struct bio_vec * bv , unsigned * nsegs ,
230
- unsigned * bytes , unsigned max_segs ,
231
- unsigned max_bytes )
222
+ static bool bvec_split_segs (struct queue_limits * lim , const struct bio_vec * bv ,
223
+ unsigned * nsegs , unsigned * bytes , unsigned max_segs ,
224
+ unsigned max_bytes )
232
225
{
233
226
unsigned max_len = min (max_bytes , UINT_MAX ) - * bytes ;
234
227
unsigned len = min (bv -> bv_len , max_len );
235
228
unsigned total_len = 0 ;
236
229
unsigned seg_size = 0 ;
237
230
238
231
while (len && * nsegs < max_segs ) {
239
- seg_size = get_max_segment_size (q , bv -> bv_page ,
232
+ seg_size = get_max_segment_size (lim , bv -> bv_page ,
240
233
bv -> bv_offset + total_len );
241
234
seg_size = min (seg_size , len );
242
235
243
236
(* nsegs )++ ;
244
237
total_len += seg_size ;
245
238
len -= seg_size ;
246
239
247
- if ((bv -> bv_offset + total_len ) & queue_virt_boundary ( q ) )
240
+ if ((bv -> bv_offset + total_len ) & lim -> virt_boundary_mask )
248
241
break ;
249
242
}
250
243
@@ -257,7 +250,7 @@ static bool bvec_split_segs(const struct request_queue *q,
257
250
/**
258
251
* bio_split_rw - split a bio in two bios
259
252
* @bio: [in] bio to be split
260
- * @q : [in] request queue pointer
253
+ * @lim : [in] queue limits to split based on
261
254
* @segs: [out] number of segments in the bio with the first half of the sectors
262
255
* @bs: [in] bio set to allocate the clone from
263
256
* @max_bytes: [in] maximum number of bytes per bio
@@ -274,30 +267,30 @@ static bool bvec_split_segs(const struct request_queue *q,
274
267
* responsible for ensuring that @bs is only destroyed after processing of the
275
268
* split bio has finished.
276
269
*/
277
- static struct bio * bio_split_rw (struct bio * bio , struct request_queue * q ,
270
+ static struct bio * bio_split_rw (struct bio * bio , struct queue_limits * lim ,
278
271
unsigned * segs , struct bio_set * bs , unsigned max_bytes )
279
272
{
280
273
struct bio_vec bv , bvprv , * bvprvp = NULL ;
281
274
struct bvec_iter iter ;
282
275
unsigned nsegs = 0 , bytes = 0 ;
283
- const unsigned max_segs = queue_max_segments (q );
284
276
285
277
bio_for_each_bvec (bv , bio , iter ) {
286
278
/*
287
279
* If the queue doesn't support SG gaps and adding this
288
280
* offset would create a gap, disallow it.
289
281
*/
290
- if (bvprvp && bvec_gap_to_prev (q , bvprvp , bv .bv_offset ))
282
+ if (bvprvp && bvec_gap_to_prev (lim , bvprvp , bv .bv_offset ))
291
283
goto split ;
292
284
293
- if (nsegs < max_segs &&
285
+ if (nsegs < lim -> max_segments &&
294
286
bytes + bv .bv_len <= max_bytes &&
295
287
bv .bv_offset + bv .bv_len <= PAGE_SIZE ) {
296
288
nsegs ++ ;
297
289
bytes += bv .bv_len ;
298
- } else if (bvec_split_segs (q , & bv , & nsegs , & bytes , max_segs ,
299
- max_bytes )) {
300
- goto split ;
290
+ } else {
291
+ if (bvec_split_segs (lim , & bv , & nsegs , & bytes ,
292
+ lim -> max_segments , max_bytes ))
293
+ goto split ;
301
294
}
302
295
303
296
bvprv = bv ;
@@ -314,7 +307,7 @@ static struct bio *bio_split_rw(struct bio *bio, struct request_queue *q,
314
307
* split size so that each bio is properly block size aligned, even if
315
308
* we do not use the full hardware limits.
316
309
*/
317
- bytes = ALIGN_DOWN (bytes , queue_logical_block_size ( q ) );
310
+ bytes = ALIGN_DOWN (bytes , lim -> logical_block_size );
318
311
319
312
/*
320
313
* Bio splitting may cause subtle trouble such as hang when doing sync
@@ -328,7 +321,7 @@ static struct bio *bio_split_rw(struct bio *bio, struct request_queue *q,
328
321
/**
329
322
* __bio_split_to_limits - split a bio to fit the queue limits
330
323
* @bio: bio to be split
331
- * @q : request_queue new bio is being queued at
324
+ * @lim : queue limits to split based on
332
325
* @nr_segs: returns the number of segments in the returned bio
333
326
*
334
327
* Check if @bio needs splitting based on the queue limits, and if so split off
@@ -338,7 +331,7 @@ static struct bio *bio_split_rw(struct bio *bio, struct request_queue *q,
338
331
* The split bio is allocated from @q->bio_split, which is provided by the
339
332
* block layer.
340
333
*/
341
- struct bio * __bio_split_to_limits (struct bio * bio , struct request_queue * q ,
334
+ struct bio * __bio_split_to_limits (struct bio * bio , struct queue_limits * lim ,
342
335
unsigned int * nr_segs )
343
336
{
344
337
struct bio_set * bs = & bio -> bi_bdev -> bd_disk -> bio_split ;
@@ -347,14 +340,14 @@ struct bio *__bio_split_to_limits(struct bio *bio, struct request_queue *q,
347
340
switch (bio_op (bio )) {
348
341
case REQ_OP_DISCARD :
349
342
case REQ_OP_SECURE_ERASE :
350
- split = bio_split_discard (bio , q , nr_segs , bs );
343
+ split = bio_split_discard (bio , lim , nr_segs , bs );
351
344
break ;
352
345
case REQ_OP_WRITE_ZEROES :
353
- split = bio_split_write_zeroes (bio , q , nr_segs , bs );
346
+ split = bio_split_write_zeroes (bio , lim , nr_segs , bs );
354
347
break ;
355
348
default :
356
- split = bio_split_rw (bio , q , nr_segs , bs ,
357
- get_max_io_size (bio , q ) << SECTOR_SHIFT );
349
+ split = bio_split_rw (bio , lim , nr_segs , bs ,
350
+ get_max_io_size (bio , lim ) << SECTOR_SHIFT );
358
351
break ;
359
352
}
360
353
@@ -384,11 +377,11 @@ struct bio *__bio_split_to_limits(struct bio *bio, struct request_queue *q,
384
377
*/
385
378
struct bio * bio_split_to_limits (struct bio * bio )
386
379
{
387
- struct request_queue * q = bdev_get_queue (bio -> bi_bdev );
380
+ struct queue_limits * lim = & bdev_get_queue (bio -> bi_bdev )-> limits ;
388
381
unsigned int nr_segs ;
389
382
390
- if (bio_may_exceed_limits (bio , q ))
391
- return __bio_split_to_limits (bio , q , & nr_segs );
383
+ if (bio_may_exceed_limits (bio , lim ))
384
+ return __bio_split_to_limits (bio , lim , & nr_segs );
392
385
return bio ;
393
386
}
394
387
EXPORT_SYMBOL (bio_split_to_limits );
@@ -421,7 +414,7 @@ unsigned int blk_recalc_rq_segments(struct request *rq)
421
414
}
422
415
423
416
rq_for_each_bvec (bv , rq , iter )
424
- bvec_split_segs (rq -> q , & bv , & nr_phys_segs , & bytes ,
417
+ bvec_split_segs (& rq -> q -> limits , & bv , & nr_phys_segs , & bytes ,
425
418
UINT_MAX , UINT_MAX );
426
419
return nr_phys_segs ;
427
420
}
@@ -452,8 +445,8 @@ static unsigned blk_bvec_map_sg(struct request_queue *q,
452
445
453
446
while (nbytes > 0 ) {
454
447
unsigned offset = bvec -> bv_offset + total ;
455
- unsigned len = min (get_max_segment_size (q , bvec -> bv_page ,
456
- offset ), nbytes );
448
+ unsigned len = min (get_max_segment_size (& q -> limits ,
449
+ bvec -> bv_page , offset ), nbytes );
457
450
struct page * page = bvec -> bv_page ;
458
451
459
452
/*
0 commit comments