Skip to content

Commit c98cb5b

Browse files
committed
block: make bio_queue_enter() fast-path available inline
Just a prep patch for shifting the queue enter logic. This moves the expected fast path inline, and leaves __bio_queue_enter() as an out-of-line function call. We don't want to inline the latter, as it's mostly slow path code. Reviewed-by: Christoph Hellwig <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
1 parent 7153971 commit c98cb5b

File tree

2 files changed

+35
-27
lines changed

2 files changed

+35
-27
lines changed

block/blk-core.c

Lines changed: 1 addition & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -386,30 +386,6 @@ void blk_cleanup_queue(struct request_queue *q)
386386
}
387387
EXPORT_SYMBOL(blk_cleanup_queue);
388388

389-
static bool blk_try_enter_queue(struct request_queue *q, bool pm)
390-
{
391-
rcu_read_lock();
392-
if (!percpu_ref_tryget_live_rcu(&q->q_usage_counter))
393-
goto fail;
394-
395-
/*
396-
* The code that increments the pm_only counter must ensure that the
397-
* counter is globally visible before the queue is unfrozen.
398-
*/
399-
if (blk_queue_pm_only(q) &&
400-
(!pm || queue_rpm_status(q) == RPM_SUSPENDED))
401-
goto fail_put;
402-
403-
rcu_read_unlock();
404-
return true;
405-
406-
fail_put:
407-
blk_queue_exit(q);
408-
fail:
409-
rcu_read_unlock();
410-
return false;
411-
}
412-
413389
/**
414390
* blk_queue_enter() - try to increase q->q_usage_counter
415391
* @q: request queue pointer
@@ -442,10 +418,8 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
442418
return 0;
443419
}
444420

445-
static inline int bio_queue_enter(struct bio *bio)
421+
int __bio_queue_enter(struct request_queue *q, struct bio *bio)
446422
{
447-
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
448-
449423
while (!blk_try_enter_queue(q, false)) {
450424
struct gendisk *disk = bio->bi_bdev->bd_disk;
451425

block/blk.h

Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,40 @@ void blk_free_flush_queue(struct blk_flush_queue *q);
5555
void blk_freeze_queue(struct request_queue *q);
5656
void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
5757
void blk_queue_start_drain(struct request_queue *q);
58+
int __bio_queue_enter(struct request_queue *q, struct bio *bio);
59+
60+
static inline bool blk_try_enter_queue(struct request_queue *q, bool pm)
61+
{
62+
rcu_read_lock();
63+
if (!percpu_ref_tryget_live_rcu(&q->q_usage_counter))
64+
goto fail;
65+
66+
/*
67+
* The code that increments the pm_only counter must ensure that the
68+
* counter is globally visible before the queue is unfrozen.
69+
*/
70+
if (blk_queue_pm_only(q) &&
71+
(!pm || queue_rpm_status(q) == RPM_SUSPENDED))
72+
goto fail_put;
73+
74+
rcu_read_unlock();
75+
return true;
76+
77+
fail_put:
78+
blk_queue_exit(q);
79+
fail:
80+
rcu_read_unlock();
81+
return false;
82+
}
83+
84+
static inline int bio_queue_enter(struct bio *bio)
85+
{
86+
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
87+
88+
if (blk_try_enter_queue(q, false))
89+
return 0;
90+
return __bio_queue_enter(q, bio);
91+
}
5892

5993
#define BIO_INLINE_VECS 4
6094
struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,

0 commit comments

Comments
 (0)