Skip to content

Commit 42e6c6c

Browse files
YuKuai-huaweiaxboe
authored andcommitted
lib/sbitmap: convert shallow_depth from one word to the whole sbitmap
Currently elevators will record internal 'async_depth' to throttle asynchronous requests, and they both calculate shallow_dpeth based on sb->shift, with the respect that sb->shift is the available tags in one word. However, sb->shift is not the availbale tags in the last word, see __map_depth: if (index == sb->map_nr - 1) return sb->depth - (index << sb->shift); For consequence, if the last word is used, more tags can be get than expected, for example, assume nr_requests=256 and there are four words, in the worst case if user set nr_requests=32, then the first word is the last word, and still use bits per word, which is 64, to calculate async_depth is wrong. One the ohter hand, due to cgroup qos, bfq can allow only one request to be allocated, and set shallow_dpeth=1 will still allow the number of words request to be allocated. Fix this problems by using shallow_depth to the whole sbitmap instead of per word, also change kyber, mq-deadline and bfq to follow this, a new helper __map_depth_with_shallow() is introduced to calculate available bits in each word. Signed-off-by: Yu Kuai <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jens Axboe <[email protected]>
1 parent 80f2180 commit 42e6c6c

File tree

6 files changed

+52
-73
lines changed

6 files changed

+52
-73
lines changed

block/bfq-iosched.c

Lines changed: 16 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -694,17 +694,13 @@ static void bfq_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data)
694694
{
695695
struct bfq_data *bfqd = data->q->elevator->elevator_data;
696696
struct bfq_io_cq *bic = bfq_bic_lookup(data->q);
697-
int depth;
698-
unsigned limit = data->q->nr_requests;
699-
unsigned int act_idx;
697+
unsigned int limit, act_idx;
700698

701699
/* Sync reads have full depth available */
702-
if (op_is_sync(opf) && !op_is_write(opf)) {
703-
depth = 0;
704-
} else {
705-
depth = bfqd->word_depths[!!bfqd->wr_busy_queues][op_is_sync(opf)];
706-
limit = (limit * depth) >> bfqd->full_depth_shift;
707-
}
700+
if (op_is_sync(opf) && !op_is_write(opf))
701+
limit = data->q->nr_requests;
702+
else
703+
limit = bfqd->async_depths[!!bfqd->wr_busy_queues][op_is_sync(opf)];
708704

709705
for (act_idx = 0; bic && act_idx < bfqd->num_actuators; act_idx++) {
710706
/* Fast path to check if bfqq is already allocated. */
@@ -718,14 +714,16 @@ static void bfq_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data)
718714
* available requests and thus starve other entities.
719715
*/
720716
if (bfqq_request_over_limit(bfqd, bic, opf, act_idx, limit)) {
721-
depth = 1;
717+
limit = 1;
722718
break;
723719
}
724720
}
721+
725722
bfq_log(bfqd, "[%s] wr_busy %d sync %d depth %u",
726-
__func__, bfqd->wr_busy_queues, op_is_sync(opf), depth);
727-
if (depth)
728-
data->shallow_depth = depth;
723+
__func__, bfqd->wr_busy_queues, op_is_sync(opf), limit);
724+
725+
if (limit < data->q->nr_requests)
726+
data->shallow_depth = limit;
729727
}
730728

731729
static struct bfq_queue *
@@ -7114,9 +7112,8 @@ void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg)
71147112
*/
71157113
static void bfq_update_depths(struct bfq_data *bfqd, struct sbitmap_queue *bt)
71167114
{
7117-
unsigned int depth = 1U << bt->sb.shift;
7115+
unsigned int nr_requests = bfqd->queue->nr_requests;
71187116

7119-
bfqd->full_depth_shift = bt->sb.shift;
71207117
/*
71217118
* In-word depths if no bfq_queue is being weight-raised:
71227119
* leaving 25% of tags only for sync reads.
@@ -7128,13 +7125,13 @@ static void bfq_update_depths(struct bfq_data *bfqd, struct sbitmap_queue *bt)
71287125
* limit 'something'.
71297126
*/
71307127
/* no more than 50% of tags for async I/O */
7131-
bfqd->word_depths[0][0] = max(depth >> 1, 1U);
7128+
bfqd->async_depths[0][0] = max(nr_requests >> 1, 1U);
71327129
/*
71337130
* no more than 75% of tags for sync writes (25% extra tags
71347131
* w.r.t. async I/O, to prevent async I/O from starving sync
71357132
* writes)
71367133
*/
7137-
bfqd->word_depths[0][1] = max((depth * 3) >> 2, 1U);
7134+
bfqd->async_depths[0][1] = max((nr_requests * 3) >> 2, 1U);
71387135

71397136
/*
71407137
* In-word depths in case some bfq_queue is being weight-
@@ -7144,9 +7141,9 @@ static void bfq_update_depths(struct bfq_data *bfqd, struct sbitmap_queue *bt)
71447141
* shortage.
71457142
*/
71467143
/* no more than ~18% of tags for async I/O */
7147-
bfqd->word_depths[1][0] = max((depth * 3) >> 4, 1U);
7144+
bfqd->async_depths[1][0] = max((nr_requests * 3) >> 4, 1U);
71487145
/* no more than ~37% of tags for sync writes (~20% extra tags) */
7149-
bfqd->word_depths[1][1] = max((depth * 6) >> 4, 1U);
7146+
bfqd->async_depths[1][1] = max((nr_requests * 6) >> 4, 1U);
71507147
}
71517148

71527149
static void bfq_depth_updated(struct blk_mq_hw_ctx *hctx)

block/bfq-iosched.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -813,8 +813,7 @@ struct bfq_data {
813813
* Depth limits used in bfq_limit_depth (see comments on the
814814
* function)
815815
*/
816-
unsigned int word_depths[2][2];
817-
unsigned int full_depth_shift;
816+
unsigned int async_depths[2][2];
818817

819818
/*
820819
* Number of independent actuators. This is equal to 1 in

block/kyber-iosched.c

Lines changed: 2 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -157,10 +157,7 @@ struct kyber_queue_data {
157157
*/
158158
struct sbitmap_queue domain_tokens[KYBER_NUM_DOMAINS];
159159

160-
/*
161-
* Async request percentage, converted to per-word depth for
162-
* sbitmap_get_shallow().
163-
*/
160+
/* Number of allowed async requests. */
164161
unsigned int async_depth;
165162

166163
struct kyber_cpu_latency __percpu *cpu_latency;
@@ -447,10 +444,8 @@ static void kyber_depth_updated(struct blk_mq_hw_ctx *hctx)
447444
{
448445
struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data;
449446
struct blk_mq_tags *tags = hctx->sched_tags;
450-
unsigned int shift = tags->bitmap_tags.sb.shift;
451-
452-
kqd->async_depth = (1U << shift) * KYBER_ASYNC_PERCENT / 100U;
453447

448+
kqd->async_depth = hctx->queue->nr_requests * KYBER_ASYNC_PERCENT / 100U;
454449
sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, kqd->async_depth);
455450
}
456451

block/mq-deadline.c

Lines changed: 1 addition & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -487,20 +487,6 @@ static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
487487
return rq;
488488
}
489489

490-
/*
491-
* 'depth' is a number in the range 1..INT_MAX representing a number of
492-
* requests. Scale it with a factor (1 << bt->sb.shift) / q->nr_requests since
493-
* 1..(1 << bt->sb.shift) is the range expected by sbitmap_get_shallow().
494-
* Values larger than q->nr_requests have the same effect as q->nr_requests.
495-
*/
496-
static int dd_to_word_depth(struct blk_mq_hw_ctx *hctx, unsigned int qdepth)
497-
{
498-
struct sbitmap_queue *bt = &hctx->sched_tags->bitmap_tags;
499-
const unsigned int nrr = hctx->queue->nr_requests;
500-
501-
return ((qdepth << bt->sb.shift) + nrr - 1) / nrr;
502-
}
503-
504490
/*
505491
* Called by __blk_mq_alloc_request(). The shallow_depth value set by this
506492
* function is used by __blk_mq_get_tag().
@@ -517,7 +503,7 @@ static void dd_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data)
517503
* Throttle asynchronous requests and writes such that these requests
518504
* do not block the allocation of synchronous requests.
519505
*/
520-
data->shallow_depth = dd_to_word_depth(data->hctx, dd->async_depth);
506+
data->shallow_depth = dd->async_depth;
521507
}
522508

523509
/* Called by blk_mq_update_nr_requests(). */

include/linux/sbitmap.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -213,12 +213,12 @@ int sbitmap_get(struct sbitmap *sb);
213213
* sbitmap_get_shallow() - Try to allocate a free bit from a &struct sbitmap,
214214
* limiting the depth used from each word.
215215
* @sb: Bitmap to allocate from.
216-
* @shallow_depth: The maximum number of bits to allocate from a single word.
216+
* @shallow_depth: The maximum number of bits to allocate from the bitmap.
217217
*
218218
* This rather specific operation allows for having multiple users with
219219
* different allocation limits. E.g., there can be a high-priority class that
220220
* uses sbitmap_get() and a low-priority class that uses sbitmap_get_shallow()
221-
* with a @shallow_depth of (1 << (@sb->shift - 1)). Then, the low-priority
221+
* with a @shallow_depth of (sb->depth >> 1). Then, the low-priority
222222
* class can only allocate half of the total bits in the bitmap, preventing it
223223
* from starving out the high-priority class.
224224
*
@@ -478,7 +478,7 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
478478
* sbitmap_queue, limiting the depth used from each word, with preemption
479479
* already disabled.
480480
* @sbq: Bitmap queue to allocate from.
481-
* @shallow_depth: The maximum number of bits to allocate from a single word.
481+
* @shallow_depth: The maximum number of bits to allocate from the queue.
482482
* See sbitmap_get_shallow().
483483
*
484484
* If you call this, make sure to call sbitmap_queue_min_shallow_depth() after

lib/sbitmap.c

Lines changed: 29 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -208,8 +208,28 @@ static int sbitmap_find_bit_in_word(struct sbitmap_word *map,
208208
return nr;
209209
}
210210

211+
static unsigned int __map_depth_with_shallow(const struct sbitmap *sb,
212+
int index,
213+
unsigned int shallow_depth)
214+
{
215+
u64 shallow_word_depth;
216+
unsigned int word_depth, reminder;
217+
218+
word_depth = __map_depth(sb, index);
219+
if (shallow_depth >= sb->depth)
220+
return word_depth;
221+
222+
shallow_word_depth = word_depth * shallow_depth;
223+
reminder = do_div(shallow_word_depth, sb->depth);
224+
225+
if (reminder >= (index + 1) * word_depth)
226+
shallow_word_depth++;
227+
228+
return (unsigned int)shallow_word_depth;
229+
}
230+
211231
static int sbitmap_find_bit(struct sbitmap *sb,
212-
unsigned int depth,
232+
unsigned int shallow_depth,
213233
unsigned int index,
214234
unsigned int alloc_hint,
215235
bool wrap)
@@ -218,12 +238,12 @@ static int sbitmap_find_bit(struct sbitmap *sb,
218238
int nr = -1;
219239

220240
for (i = 0; i < sb->map_nr; i++) {
221-
nr = sbitmap_find_bit_in_word(&sb->map[index],
222-
min_t(unsigned int,
223-
__map_depth(sb, index),
224-
depth),
225-
alloc_hint, wrap);
241+
unsigned int depth = __map_depth_with_shallow(sb, index,
242+
shallow_depth);
226243

244+
if (depth)
245+
nr = sbitmap_find_bit_in_word(&sb->map[index], depth,
246+
alloc_hint, wrap);
227247
if (nr != -1) {
228248
nr += index << sb->shift;
229249
break;
@@ -406,27 +426,9 @@ EXPORT_SYMBOL_GPL(sbitmap_bitmap_show);
406426
static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq,
407427
unsigned int depth)
408428
{
409-
unsigned int wake_batch;
410-
unsigned int shallow_depth;
411-
412-
/*
413-
* Each full word of the bitmap has bits_per_word bits, and there might
414-
* be a partial word. There are depth / bits_per_word full words and
415-
* depth % bits_per_word bits left over. In bitwise arithmetic:
416-
*
417-
* bits_per_word = 1 << shift
418-
* depth / bits_per_word = depth >> shift
419-
* depth % bits_per_word = depth & ((1 << shift) - 1)
420-
*
421-
* Each word can be limited to sbq->min_shallow_depth bits.
422-
*/
423-
shallow_depth = min(1U << sbq->sb.shift, sbq->min_shallow_depth);
424-
depth = ((depth >> sbq->sb.shift) * shallow_depth +
425-
min(depth & ((1U << sbq->sb.shift) - 1), shallow_depth));
426-
wake_batch = clamp_t(unsigned int, depth / SBQ_WAIT_QUEUES, 1,
427-
SBQ_WAKE_BATCH);
428-
429-
return wake_batch;
429+
return clamp_t(unsigned int,
430+
min(depth, sbq->min_shallow_depth) / SBQ_WAIT_QUEUES,
431+
1, SBQ_WAKE_BATCH);
430432
}
431433

432434
int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,

0 commit comments

Comments
 (0)