@@ -478,7 +478,7 @@ __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data)
478478 prefetch (tags -> static_rqs [tag ]);
479479 tag_mask &= ~(1UL << i );
480480 rq = blk_mq_rq_ctx_init (data , tags , tag );
481- rq_list_add (data -> cached_rq , rq );
481+ rq_list_add_head (data -> cached_rqs , rq );
482482 nr ++ ;
483483 }
484484 if (!(data -> rq_flags & RQF_SCHED_TAGS ))
@@ -487,7 +487,7 @@ __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data)
487487 percpu_ref_get_many (& data -> q -> q_usage_counter , nr - 1 );
488488 data -> nr_tags -= nr ;
489489
490- return rq_list_pop (data -> cached_rq );
490+ return rq_list_pop (data -> cached_rqs );
491491}
492492
493493static struct request * __blk_mq_alloc_requests (struct blk_mq_alloc_data * data )
@@ -584,7 +584,7 @@ static struct request *blk_mq_rq_cache_fill(struct request_queue *q,
584584 .flags = flags ,
585585 .cmd_flags = opf ,
586586 .nr_tags = plug -> nr_ios ,
587- .cached_rq = & plug -> cached_rq ,
587+ .cached_rqs = & plug -> cached_rqs ,
588588 };
589589 struct request * rq ;
590590
@@ -609,14 +609,14 @@ static struct request *blk_mq_alloc_cached_request(struct request_queue *q,
609609 if (!plug )
610610 return NULL ;
611611
612- if (rq_list_empty (plug -> cached_rq )) {
612+ if (rq_list_empty (& plug -> cached_rqs )) {
613613 if (plug -> nr_ios == 1 )
614614 return NULL ;
615615 rq = blk_mq_rq_cache_fill (q , plug , opf , flags );
616616 if (!rq )
617617 return NULL ;
618618 } else {
619- rq = rq_list_peek (& plug -> cached_rq );
619+ rq = rq_list_peek (& plug -> cached_rqs );
620620 if (!rq || rq -> q != q )
621621 return NULL ;
622622
@@ -625,7 +625,7 @@ static struct request *blk_mq_alloc_cached_request(struct request_queue *q,
625625 if (op_is_flush (rq -> cmd_flags ) != op_is_flush (opf ))
626626 return NULL ;
627627
628- plug -> cached_rq = rq_list_next ( rq );
628+ rq_list_pop ( & plug -> cached_rqs );
629629 blk_mq_rq_time_init (rq , blk_time_get_ns ());
630630 }
631631
@@ -802,7 +802,7 @@ void blk_mq_free_plug_rqs(struct blk_plug *plug)
802802{
803803 struct request * rq ;
804804
805- while ((rq = rq_list_pop (& plug -> cached_rq )) != NULL )
805+ while ((rq = rq_list_pop (& plug -> cached_rqs )) != NULL )
806806 blk_mq_free_request (rq );
807807}
808808
@@ -1392,8 +1392,7 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
13921392 */
13931393 if (!plug -> has_elevator && (rq -> rq_flags & RQF_SCHED_TAGS ))
13941394 plug -> has_elevator = true;
1395- rq -> rq_next = NULL ;
1396- rq_list_add (& plug -> mq_list , rq );
1395+ rq_list_add_head (& plug -> mq_list , rq );
13971396 plug -> rq_count ++ ;
13981397}
13991398
@@ -2785,7 +2784,7 @@ static void blk_mq_plug_issue_direct(struct blk_plug *plug)
27852784 blk_status_t ret = BLK_STS_OK ;
27862785
27872786 while ((rq = rq_list_pop (& plug -> mq_list ))) {
2788- bool last = rq_list_empty (plug -> mq_list );
2787+ bool last = rq_list_empty (& plug -> mq_list );
27892788
27902789 if (hctx != rq -> mq_hctx ) {
27912790 if (hctx ) {
@@ -2828,8 +2827,7 @@ static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
28282827{
28292828 struct blk_mq_hw_ctx * this_hctx = NULL ;
28302829 struct blk_mq_ctx * this_ctx = NULL ;
2831- struct request * requeue_list = NULL ;
2832- struct request * * requeue_lastp = & requeue_list ;
2830+ struct rq_list requeue_list = {};
28332831 unsigned int depth = 0 ;
28342832 bool is_passthrough = false;
28352833 LIST_HEAD (list );
@@ -2843,12 +2841,12 @@ static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
28432841 is_passthrough = blk_rq_is_passthrough (rq );
28442842 } else if (this_hctx != rq -> mq_hctx || this_ctx != rq -> mq_ctx ||
28452843 is_passthrough != blk_rq_is_passthrough (rq )) {
2846- rq_list_add_tail (& requeue_lastp , rq );
2844+ rq_list_add_tail (& requeue_list , rq );
28472845 continue ;
28482846 }
28492847 list_add (& rq -> queuelist , & list );
28502848 depth ++ ;
2851- } while (!rq_list_empty (plug -> mq_list ));
2849+ } while (!rq_list_empty (& plug -> mq_list ));
28522850
28532851 plug -> mq_list = requeue_list ;
28542852 trace_block_unplug (this_hctx -> queue , depth , !from_sched );
@@ -2903,19 +2901,19 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
29032901 if (q -> mq_ops -> queue_rqs ) {
29042902 blk_mq_run_dispatch_ops (q ,
29052903 __blk_mq_flush_plug_list (q , plug ));
2906- if (rq_list_empty (plug -> mq_list ))
2904+ if (rq_list_empty (& plug -> mq_list ))
29072905 return ;
29082906 }
29092907
29102908 blk_mq_run_dispatch_ops (q ,
29112909 blk_mq_plug_issue_direct (plug ));
2912- if (rq_list_empty (plug -> mq_list ))
2910+ if (rq_list_empty (& plug -> mq_list ))
29132911 return ;
29142912 }
29152913
29162914 do {
29172915 blk_mq_dispatch_plug_list (plug , from_schedule );
2918- } while (!rq_list_empty (plug -> mq_list ));
2916+ } while (!rq_list_empty (& plug -> mq_list ));
29192917}
29202918
29212919static void blk_mq_try_issue_list_directly (struct blk_mq_hw_ctx * hctx ,
@@ -2980,7 +2978,7 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
29802978 if (plug ) {
29812979 data .nr_tags = plug -> nr_ios ;
29822980 plug -> nr_ios = 1 ;
2983- data .cached_rq = & plug -> cached_rq ;
2981+ data .cached_rqs = & plug -> cached_rqs ;
29842982 }
29852983
29862984 rq = __blk_mq_alloc_requests (& data );
@@ -3003,7 +3001,7 @@ static struct request *blk_mq_peek_cached_request(struct blk_plug *plug,
30033001
30043002 if (!plug )
30053003 return NULL ;
3006- rq = rq_list_peek (& plug -> cached_rq );
3004+ rq = rq_list_peek (& plug -> cached_rqs );
30073005 if (!rq || rq -> q != q )
30083006 return NULL ;
30093007 if (type != rq -> mq_hctx -> type &&
@@ -3017,14 +3015,14 @@ static struct request *blk_mq_peek_cached_request(struct blk_plug *plug,
30173015static void blk_mq_use_cached_rq (struct request * rq , struct blk_plug * plug ,
30183016 struct bio * bio )
30193017{
3020- WARN_ON_ONCE (rq_list_peek (& plug -> cached_rq ) != rq );
3018+ if (rq_list_pop (& plug -> cached_rqs ) != rq )
3019+ WARN_ON_ONCE (1 );
30213020
30223021 /*
30233022 * If any qos ->throttle() end up blocking, we will have flushed the
30243023 * plug and hence killed the cached_rq list as well. Pop this entry
30253024 * before we throttle.
30263025 */
3027- plug -> cached_rq = rq_list_next (rq );
30283026 rq_qos_throttle (rq -> q , bio );
30293027
30303028 blk_mq_rq_time_init (rq , blk_time_get_ns ());
0 commit comments