@@ -2858,11 +2858,8 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
2858
2858
};
2859
2859
struct request * rq ;
2860
2860
2861
- if (unlikely (bio_queue_enter (bio )))
2862
- return NULL ;
2863
-
2864
2861
if (blk_mq_attempt_bio_merge (q , bio , nsegs ))
2865
- goto queue_exit ;
2862
+ return NULL ;
2866
2863
2867
2864
rq_qos_throttle (q , bio );
2868
2865
@@ -2878,48 +2875,36 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
2878
2875
rq_qos_cleanup (q , bio );
2879
2876
if (bio -> bi_opf & REQ_NOWAIT )
2880
2877
bio_wouldblock_error (bio );
2881
- queue_exit :
2882
- blk_queue_exit (q );
2883
2878
return NULL ;
2884
2879
}
2885
2880
2886
- static inline struct request * blk_mq_get_cached_request (struct request_queue * q ,
2887
- struct blk_plug * plug , struct bio * * bio , unsigned int nsegs )
2881
+ /* return true if this @rq can be used for @bio */
2882
+ static bool blk_mq_can_use_cached_rq (struct request * rq , struct blk_plug * plug ,
2883
+ struct bio * bio )
2888
2884
{
2889
- struct request * rq ;
2890
- enum hctx_type type , hctx_type ;
2885
+ enum hctx_type type = blk_mq_get_hctx_type ( bio -> bi_opf ) ;
2886
+ enum hctx_type hctx_type = rq -> mq_hctx -> type ;
2891
2887
2892
- if (!plug )
2893
- return NULL ;
2894
- rq = rq_list_peek (& plug -> cached_rq );
2895
- if (!rq || rq -> q != q )
2896
- return NULL ;
2888
+ WARN_ON_ONCE (rq_list_peek (& plug -> cached_rq ) != rq );
2897
2889
2898
- if (blk_mq_attempt_bio_merge (q , * bio , nsegs )) {
2899
- * bio = NULL ;
2900
- return NULL ;
2901
- }
2902
-
2903
- type = blk_mq_get_hctx_type ((* bio )-> bi_opf );
2904
- hctx_type = rq -> mq_hctx -> type ;
2905
2890
if (type != hctx_type &&
2906
2891
!(type == HCTX_TYPE_READ && hctx_type == HCTX_TYPE_DEFAULT ))
2907
- return NULL ;
2908
- if (op_is_flush (rq -> cmd_flags ) != op_is_flush (( * bio ) -> bi_opf ))
2909
- return NULL ;
2892
+ return false ;
2893
+ if (op_is_flush (rq -> cmd_flags ) != op_is_flush (bio -> bi_opf ))
2894
+ return false ;
2910
2895
2911
2896
/*
2912
2897
* If any qos ->throttle() end up blocking, we will have flushed the
2913
2898
* plug and hence killed the cached_rq list as well. Pop this entry
2914
2899
* before we throttle.
2915
2900
*/
2916
2901
plug -> cached_rq = rq_list_next (rq );
2917
- rq_qos_throttle (q , * bio );
2902
+ rq_qos_throttle (rq -> q , bio );
2918
2903
2919
2904
blk_mq_rq_time_init (rq , 0 );
2920
- rq -> cmd_flags = ( * bio ) -> bi_opf ;
2905
+ rq -> cmd_flags = bio -> bi_opf ;
2921
2906
INIT_LIST_HEAD (& rq -> queuelist );
2922
- return rq ;
2907
+ return true ;
2923
2908
}
2924
2909
2925
2910
static void bio_set_ioprio (struct bio * bio )
@@ -2949,7 +2934,7 @@ void blk_mq_submit_bio(struct bio *bio)
2949
2934
struct blk_plug * plug = blk_mq_plug (bio );
2950
2935
const int is_sync = op_is_sync (bio -> bi_opf );
2951
2936
struct blk_mq_hw_ctx * hctx ;
2952
- struct request * rq ;
2937
+ struct request * rq = NULL ;
2953
2938
unsigned int nr_segs = 1 ;
2954
2939
blk_status_t ret ;
2955
2940
@@ -2960,20 +2945,36 @@ void blk_mq_submit_bio(struct bio *bio)
2960
2945
return ;
2961
2946
}
2962
2947
2963
- if (!bio_integrity_prep (bio ))
2964
- return ;
2965
-
2966
2948
bio_set_ioprio (bio );
2967
2949
2968
- rq = blk_mq_get_cached_request (q , plug , & bio , nr_segs );
2969
- if (!rq ) {
2970
- if (!bio )
2950
+ if (plug ) {
2951
+ rq = rq_list_peek (& plug -> cached_rq );
2952
+ if (rq && rq -> q != q )
2953
+ rq = NULL ;
2954
+ }
2955
+ if (rq ) {
2956
+ if (!bio_integrity_prep (bio ))
2971
2957
return ;
2972
- rq = blk_mq_get_new_requests (q , plug , bio , nr_segs );
2973
- if (unlikely (!rq ))
2958
+ if (blk_mq_attempt_bio_merge (q , bio , nr_segs ))
2974
2959
return ;
2960
+ if (blk_mq_can_use_cached_rq (rq , plug , bio ))
2961
+ goto done ;
2962
+ percpu_ref_get (& q -> q_usage_counter );
2963
+ } else {
2964
+ if (unlikely (bio_queue_enter (bio )))
2965
+ return ;
2966
+ if (!bio_integrity_prep (bio ))
2967
+ goto fail ;
2968
+ }
2969
+
2970
+ rq = blk_mq_get_new_requests (q , plug , bio , nr_segs );
2971
+ if (unlikely (!rq )) {
2972
+ fail :
2973
+ blk_queue_exit (q );
2974
+ return ;
2975
2975
}
2976
2976
2977
+ done :
2977
2978
trace_block_getrq (bio );
2978
2979
2979
2980
rq_qos_track (q , rq , bio );
0 commit comments