@@ -2495,8 +2495,9 @@ static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
2495
2495
return BLK_MAX_REQUEST_COUNT ;
2496
2496
}
2497
2497
2498
- static bool blk_attempt_bio_merge (struct request_queue * q , struct bio * bio ,
2499
- unsigned int nr_segs , bool * same_queue_rq )
2498
+ static bool blk_mq_attempt_bio_merge (struct request_queue * q ,
2499
+ struct bio * bio , unsigned int nr_segs ,
2500
+ bool * same_queue_rq )
2500
2501
{
2501
2502
if (!blk_queue_nomerges (q ) && bio_mergeable (bio )) {
2502
2503
if (blk_attempt_plug_merge (q , bio , nr_segs , same_queue_rq ))
@@ -2524,7 +2525,7 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
2524
2525
return NULL ;
2525
2526
if (unlikely (!submit_bio_checks (bio )))
2526
2527
goto put_exit ;
2527
- if (blk_attempt_bio_merge (q , bio , nsegs , same_queue_rq ))
2528
+ if (blk_mq_attempt_bio_merge (q , bio , nsegs , same_queue_rq ))
2528
2529
goto put_exit ;
2529
2530
2530
2531
rq_qos_throttle (q , bio );
@@ -2560,7 +2561,8 @@ static inline struct request *blk_mq_get_request(struct request_queue *q,
2560
2561
if (rq && rq -> q == q ) {
2561
2562
if (unlikely (!submit_bio_checks (bio )))
2562
2563
return NULL ;
2563
- if (blk_attempt_bio_merge (q , bio , nsegs , same_queue_rq ))
2564
+ if (blk_mq_attempt_bio_merge (q , bio , nsegs ,
2565
+ same_queue_rq ))
2564
2566
return NULL ;
2565
2567
plug -> cached_rq = rq_list_next (rq );
2566
2568
INIT_LIST_HEAD (& rq -> queuelist );
0 commit comments