@@ -162,8 +162,7 @@ static unsigned get_max_segment_size(struct request_queue *q,
162
162
* variables.
163
163
*/
164
164
static bool bvec_split_segs (struct request_queue * q , struct bio_vec * bv ,
165
- unsigned * nsegs , unsigned * last_seg_size ,
166
- unsigned * front_seg_size , unsigned * sectors , unsigned max_segs )
165
+ unsigned * nsegs , unsigned * sectors , unsigned max_segs )
167
166
{
168
167
unsigned len = bv -> bv_len ;
169
168
unsigned total_len = 0 ;
@@ -185,28 +184,12 @@ static bool bvec_split_segs(struct request_queue *q, struct bio_vec *bv,
185
184
break ;
186
185
}
187
186
188
- if (!new_nsegs )
189
- return !!len ;
190
-
191
- /* update front segment size */
192
- if (!* nsegs ) {
193
- unsigned first_seg_size ;
194
-
195
- if (new_nsegs == 1 )
196
- first_seg_size = get_max_segment_size (q , bv -> bv_offset );
197
- else
198
- first_seg_size = queue_max_segment_size (q );
199
-
200
- if (* front_seg_size < first_seg_size )
201
- * front_seg_size = first_seg_size ;
187
+ if (new_nsegs ) {
188
+ * nsegs += new_nsegs ;
189
+ if (sectors )
190
+ * sectors += total_len >> 9 ;
202
191
}
203
192
204
- /* update other varibles */
205
- * last_seg_size = seg_size ;
206
- * nsegs += new_nsegs ;
207
- if (sectors )
208
- * sectors += total_len >> 9 ;
209
-
210
193
/* split in the middle of the bvec if len != 0 */
211
194
return !!len ;
212
195
}
@@ -218,8 +201,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
218
201
{
219
202
struct bio_vec bv , bvprv , * bvprvp = NULL ;
220
203
struct bvec_iter iter ;
221
- unsigned seg_size = 0 , nsegs = 0 , sectors = 0 ;
222
- unsigned front_seg_size = bio -> bi_seg_front_size ;
204
+ unsigned nsegs = 0 , sectors = 0 ;
223
205
bool do_split = true;
224
206
struct bio * new = NULL ;
225
207
const unsigned max_sectors = get_max_io_size (q , bio );
@@ -243,8 +225,6 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
243
225
/* split in the middle of bvec */
244
226
bv .bv_len = (max_sectors - sectors ) << 9 ;
245
227
bvec_split_segs (q , & bv , & nsegs ,
246
- & seg_size ,
247
- & front_seg_size ,
248
228
& sectors , max_segs );
249
229
}
250
230
goto split ;
@@ -258,12 +238,9 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
258
238
259
239
if (bv .bv_offset + bv .bv_len <= PAGE_SIZE ) {
260
240
nsegs ++ ;
261
- seg_size = bv .bv_len ;
262
241
sectors += bv .bv_len >> 9 ;
263
- if (nsegs == 1 && seg_size > front_seg_size )
264
- front_seg_size = seg_size ;
265
- } else if (bvec_split_segs (q , & bv , & nsegs , & seg_size ,
266
- & front_seg_size , & sectors , max_segs )) {
242
+ } else if (bvec_split_segs (q , & bv , & nsegs , & sectors ,
243
+ max_segs )) {
267
244
goto split ;
268
245
}
269
246
}
@@ -278,10 +255,6 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
278
255
bio = new ;
279
256
}
280
257
281
- bio -> bi_seg_front_size = front_seg_size ;
282
- if (seg_size > bio -> bi_seg_back_size )
283
- bio -> bi_seg_back_size = seg_size ;
284
-
285
258
return do_split ? new : NULL ;
286
259
}
287
260
@@ -336,17 +309,13 @@ EXPORT_SYMBOL(blk_queue_split);
336
309
static unsigned int __blk_recalc_rq_segments (struct request_queue * q ,
337
310
struct bio * bio )
338
311
{
339
- struct bio_vec uninitialized_var (bv ), bvprv = { NULL };
340
- unsigned int seg_size , nr_phys_segs ;
341
- unsigned front_seg_size ;
342
- struct bio * fbio , * bbio ;
312
+ unsigned int nr_phys_segs = 0 ;
343
313
struct bvec_iter iter ;
314
+ struct bio_vec bv ;
344
315
345
316
if (!bio )
346
317
return 0 ;
347
318
348
- front_seg_size = bio -> bi_seg_front_size ;
349
-
350
319
switch (bio_op (bio )) {
351
320
case REQ_OP_DISCARD :
352
321
case REQ_OP_SECURE_ERASE :
@@ -356,23 +325,11 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
356
325
return 1 ;
357
326
}
358
327
359
- fbio = bio ;
360
- seg_size = 0 ;
361
- nr_phys_segs = 0 ;
362
328
for_each_bio (bio ) {
363
- bio_for_each_bvec (bv , bio , iter ) {
364
- bvec_split_segs (q , & bv , & nr_phys_segs , & seg_size ,
365
- & front_seg_size , NULL , UINT_MAX );
366
- }
367
- bbio = bio ;
368
- if (likely (bio -> bi_iter .bi_size ))
369
- bvprv = bv ;
329
+ bio_for_each_bvec (bv , bio , iter )
330
+ bvec_split_segs (q , & bv , & nr_phys_segs , NULL , UINT_MAX );
370
331
}
371
332
372
- fbio -> bi_seg_front_size = front_seg_size ;
373
- if (seg_size > bbio -> bi_seg_back_size )
374
- bbio -> bi_seg_back_size = seg_size ;
375
-
376
333
return nr_phys_segs ;
377
334
}
378
335
@@ -392,24 +349,6 @@ void blk_recount_segments(struct request_queue *q, struct bio *bio)
392
349
bio_set_flag (bio , BIO_SEG_VALID );
393
350
}
394
351
395
- static int blk_phys_contig_segment (struct request_queue * q , struct bio * bio ,
396
- struct bio * nxt )
397
- {
398
- struct bio_vec end_bv = { NULL }, nxt_bv ;
399
-
400
- if (bio -> bi_seg_back_size + nxt -> bi_seg_front_size >
401
- queue_max_segment_size (q ))
402
- return 0 ;
403
-
404
- if (!bio_has_data (bio ))
405
- return 1 ;
406
-
407
- bio_get_last_bvec (bio , & end_bv );
408
- bio_get_first_bvec (nxt , & nxt_bv );
409
-
410
- return biovec_phys_mergeable (q , & end_bv , & nxt_bv );
411
- }
412
-
413
352
static inline struct scatterlist * blk_next_sg (struct scatterlist * * sg ,
414
353
struct scatterlist * sglist )
415
354
{
@@ -669,8 +608,6 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
669
608
struct request * next )
670
609
{
671
610
int total_phys_segments ;
672
- unsigned int seg_size =
673
- req -> biotail -> bi_seg_back_size + next -> bio -> bi_seg_front_size ;
674
611
675
612
if (req_gap_back_merge (req , next -> bio ))
676
613
return 0 ;
@@ -683,13 +620,6 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
683
620
return 0 ;
684
621
685
622
total_phys_segments = req -> nr_phys_segments + next -> nr_phys_segments ;
686
- if (blk_phys_contig_segment (q , req -> biotail , next -> bio )) {
687
- if (req -> nr_phys_segments == 1 )
688
- req -> bio -> bi_seg_front_size = seg_size ;
689
- if (next -> nr_phys_segments == 1 )
690
- next -> biotail -> bi_seg_back_size = seg_size ;
691
- }
692
-
693
623
if (total_phys_segments > queue_max_segments (q ))
694
624
return 0 ;
695
625
0 commit comments