@@ -487,6 +487,20 @@ static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
487
487
return rq ;
488
488
}
489
489
490
+ /*
491
+ * 'depth' is a number in the range 1..INT_MAX representing a number of
492
+ * requests. Scale it with a factor (1 << bt->sb.shift) / q->nr_requests since
493
+ * 1..(1 << bt->sb.shift) is the range expected by sbitmap_get_shallow().
494
+ * Values larger than q->nr_requests have the same effect as q->nr_requests.
495
+ */
496
+ static int dd_to_word_depth (struct blk_mq_hw_ctx * hctx , unsigned int qdepth )
497
+ {
498
+ struct sbitmap_queue * bt = & hctx -> sched_tags -> bitmap_tags ;
499
+ const unsigned int nrr = hctx -> queue -> nr_requests ;
500
+
501
+ return ((qdepth << bt -> sb .shift ) + nrr - 1 ) / nrr ;
502
+ }
503
+
490
504
/*
491
505
* Called by __blk_mq_alloc_request(). The shallow_depth value set by this
492
506
* function is used by __blk_mq_get_tag().
@@ -503,7 +517,7 @@ static void dd_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data)
503
517
* Throttle asynchronous requests and writes such that these requests
504
518
* do not block the allocation of synchronous requests.
505
519
*/
506
- data -> shallow_depth = dd -> async_depth ;
520
+ data -> shallow_depth = dd_to_word_depth ( data -> hctx , dd -> async_depth ) ;
507
521
}
508
522
509
523
/* Called by blk_mq_update_nr_requests(). */
@@ -513,9 +527,9 @@ static void dd_depth_updated(struct blk_mq_hw_ctx *hctx)
513
527
struct deadline_data * dd = q -> elevator -> elevator_data ;
514
528
struct blk_mq_tags * tags = hctx -> sched_tags ;
515
529
516
- dd -> async_depth = max ( 1UL , 3 * q -> nr_requests / 4 ) ;
530
+ dd -> async_depth = q -> nr_requests ;
517
531
518
- sbitmap_queue_min_shallow_depth (& tags -> bitmap_tags , dd -> async_depth );
532
+ sbitmap_queue_min_shallow_depth (& tags -> bitmap_tags , 1 );
519
533
}
520
534
521
535
/* Called by blk_mq_init_hctx() and blk_mq_init_sched(). */
0 commit comments