@@ -477,9 +477,9 @@ static void __blk_mq_free_request(struct request *rq)
477
477
blk_pm_mark_last_busy (rq );
478
478
rq -> mq_hctx = NULL ;
479
479
if (rq -> tag != -1 )
480
- blk_mq_put_tag (hctx , hctx -> tags , ctx , rq -> tag );
480
+ blk_mq_put_tag (hctx -> tags , ctx , rq -> tag );
481
481
if (sched_tag != -1 )
482
- blk_mq_put_tag (hctx , hctx -> sched_tags , ctx , sched_tag );
482
+ blk_mq_put_tag (hctx -> sched_tags , ctx , sched_tag );
483
483
blk_mq_sched_restart (hctx );
484
484
blk_queue_exit (q );
485
485
}
@@ -735,7 +735,7 @@ static void blk_mq_requeue_work(struct work_struct *work)
735
735
* merge.
736
736
*/
737
737
if (rq -> rq_flags & RQF_DONTPREP )
738
- blk_mq_request_bypass_insert (rq , false);
738
+ blk_mq_request_bypass_insert (rq , false, false );
739
739
else
740
740
blk_mq_sched_insert_request (rq , true, false, false);
741
741
}
@@ -1286,7 +1286,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
1286
1286
q -> mq_ops -> commit_rqs (hctx );
1287
1287
1288
1288
spin_lock (& hctx -> lock );
1289
- list_splice_init (list , & hctx -> dispatch );
1289
+ list_splice_tail_init (list , & hctx -> dispatch );
1290
1290
spin_unlock (& hctx -> lock );
1291
1291
1292
1292
/*
@@ -1677,12 +1677,16 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
1677
1677
* Should only be used carefully, when the caller knows we want to
1678
1678
* bypass a potential IO scheduler on the target device.
1679
1679
*/
1680
- void blk_mq_request_bypass_insert (struct request * rq , bool run_queue )
1680
+ void blk_mq_request_bypass_insert (struct request * rq , bool at_head ,
1681
+ bool run_queue )
1681
1682
{
1682
1683
struct blk_mq_hw_ctx * hctx = rq -> mq_hctx ;
1683
1684
1684
1685
spin_lock (& hctx -> lock );
1685
- list_add_tail (& rq -> queuelist , & hctx -> dispatch );
1686
+ if (at_head )
1687
+ list_add (& rq -> queuelist , & hctx -> dispatch );
1688
+ else
1689
+ list_add_tail (& rq -> queuelist , & hctx -> dispatch );
1686
1690
spin_unlock (& hctx -> lock );
1687
1691
1688
1692
if (run_queue )
@@ -1849,7 +1853,7 @@ static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1849
1853
if (bypass_insert )
1850
1854
return BLK_STS_RESOURCE ;
1851
1855
1852
- blk_mq_request_bypass_insert (rq , run_queue );
1856
+ blk_mq_request_bypass_insert (rq , false, run_queue );
1853
1857
return BLK_STS_OK ;
1854
1858
}
1855
1859
@@ -1876,7 +1880,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1876
1880
1877
1881
ret = __blk_mq_try_issue_directly (hctx , rq , cookie , false, true);
1878
1882
if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE )
1879
- blk_mq_request_bypass_insert (rq , true);
1883
+ blk_mq_request_bypass_insert (rq , false, true);
1880
1884
else if (ret != BLK_STS_OK )
1881
1885
blk_mq_end_request (rq , ret );
1882
1886
@@ -1910,7 +1914,7 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
1910
1914
if (ret != BLK_STS_OK ) {
1911
1915
if (ret == BLK_STS_RESOURCE ||
1912
1916
ret == BLK_STS_DEV_RESOURCE ) {
1913
- blk_mq_request_bypass_insert (rq ,
1917
+ blk_mq_request_bypass_insert (rq , false,
1914
1918
list_empty (list ));
1915
1919
break ;
1916
1920
}
@@ -3398,7 +3402,6 @@ static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb)
3398
3402
}
3399
3403
3400
3404
static unsigned long blk_mq_poll_nsecs (struct request_queue * q ,
3401
- struct blk_mq_hw_ctx * hctx ,
3402
3405
struct request * rq )
3403
3406
{
3404
3407
unsigned long ret = 0 ;
@@ -3431,7 +3434,6 @@ static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
3431
3434
}
3432
3435
3433
3436
static bool blk_mq_poll_hybrid_sleep (struct request_queue * q ,
3434
- struct blk_mq_hw_ctx * hctx ,
3435
3437
struct request * rq )
3436
3438
{
3437
3439
struct hrtimer_sleeper hs ;
@@ -3451,7 +3453,7 @@ static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
3451
3453
if (q -> poll_nsec > 0 )
3452
3454
nsecs = q -> poll_nsec ;
3453
3455
else
3454
- nsecs = blk_mq_poll_nsecs (q , hctx , rq );
3456
+ nsecs = blk_mq_poll_nsecs (q , rq );
3455
3457
3456
3458
if (!nsecs )
3457
3459
return false;
@@ -3506,7 +3508,7 @@ static bool blk_mq_poll_hybrid(struct request_queue *q,
3506
3508
return false;
3507
3509
}
3508
3510
3509
- return blk_mq_poll_hybrid_sleep (q , hctx , rq );
3511
+ return blk_mq_poll_hybrid_sleep (q , rq );
3510
3512
}
3511
3513
3512
3514
/**
0 commit comments