Skip to content

Commit 5421681

Browse files
YuKuai-huaweiaxboe
authored andcommitted
blk-ioc: don't hold queue_lock for ioc_lookup_icq()
Currently issue io can grab queue_lock three times from bfq_bio_merge(), bfq_limit_depth() and bfq_prepare_request(), the queue_lock is not necessary if icq is already created because both queue and ioc can't be freed before io issuing is done, hence remove the unnecessary queue_lock and use rcu to protect radix tree lookup. Noted this is also a prep patch to support request batch dispatching[1]. [1] https://lore.kernel.org/all/[email protected]/ Signed-off-by: Yu Kuai <[email protected]> Reviewed-by: Damien Le Moal <[email protected]> Reviewed-by: Jan Kara <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jens Axboe <[email protected]>
1 parent 1da67b5 commit 5421681

File tree

2 files changed

+8
-26
lines changed

2 files changed

+8
-26
lines changed

block/bfq-iosched.c

Lines changed: 2 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -454,17 +454,10 @@ static struct bfq_io_cq *icq_to_bic(struct io_cq *icq)
454454
*/
455455
static struct bfq_io_cq *bfq_bic_lookup(struct request_queue *q)
456456
{
457-
struct bfq_io_cq *icq;
458-
unsigned long flags;
459-
460457
if (!current->io_context)
461458
return NULL;
462459

463-
spin_lock_irqsave(&q->queue_lock, flags);
464-
icq = icq_to_bic(ioc_lookup_icq(q));
465-
spin_unlock_irqrestore(&q->queue_lock, flags);
466-
467-
return icq;
460+
return icq_to_bic(ioc_lookup_icq(q));
468461
}
469462

470463
/*
@@ -2457,15 +2450,8 @@ static bool bfq_bio_merge(struct request_queue *q, struct bio *bio,
24572450
unsigned int nr_segs)
24582451
{
24592452
struct bfq_data *bfqd = q->elevator->elevator_data;
2460-
struct request *free = NULL;
2461-
/*
2462-
* bfq_bic_lookup grabs the queue_lock: invoke it now and
2463-
* store its return value for later use, to avoid nesting
2464-
* queue_lock inside the bfqd->lock. We assume that the bic
2465-
* returned by bfq_bic_lookup does not go away before
2466-
* bfqd->lock is taken.
2467-
*/
24682453
struct bfq_io_cq *bic = bfq_bic_lookup(q);
2454+
struct request *free = NULL;
24692455
bool ret;
24702456

24712457
spin_lock_irq(&bfqd->lock);

block/blk-ioc.c

Lines changed: 6 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -308,24 +308,23 @@ int __copy_io(unsigned long clone_flags, struct task_struct *tsk)
308308

309309
#ifdef CONFIG_BLK_ICQ
310310
/**
311-
* ioc_lookup_icq - lookup io_cq from ioc
311+
* ioc_lookup_icq - lookup io_cq from ioc in io issue path
312312
* @q: the associated request_queue
313313
*
314314
* Look up io_cq associated with @ioc - @q pair from @ioc. Must be called
315-
* with @q->queue_lock held.
315+
* from io issue path, either return NULL if current issue io to @q for the
316+
* first time, or return a valid icq.
316317
*/
317318
struct io_cq *ioc_lookup_icq(struct request_queue *q)
318319
{
319320
struct io_context *ioc = current->io_context;
320321
struct io_cq *icq;
321322

322-
lockdep_assert_held(&q->queue_lock);
323-
324323
/*
325324
* icq's are indexed from @ioc using radix tree and hint pointer,
326-
* both of which are protected with RCU. All removals are done
327-
* holding both q and ioc locks, and we're holding q lock - if we
328-
* find a icq which points to us, it's guaranteed to be valid.
325+
* both of which are protected with RCU, io issue path ensures that
326+
* both request_queue and current task are valid, the found icq
327+
* is guaranteed to be valid until the io is done.
329328
*/
330329
rcu_read_lock();
331330
icq = rcu_dereference(ioc->icq_hint);
@@ -419,10 +418,7 @@ struct io_cq *ioc_find_get_icq(struct request_queue *q)
419418
task_unlock(current);
420419
} else {
421420
get_io_context(ioc);
422-
423-
spin_lock_irq(&q->queue_lock);
424421
icq = ioc_lookup_icq(q);
425-
spin_unlock_irq(&q->queue_lock);
426422
}
427423

428424
if (!icq) {

0 commit comments

Comments
 (0)