Skip to content

Commit 30444ea

Browse files
leitaokawasaki
authored andcommitted
blk-rq-qos: Remove unlikely() hints from QoS checks
The unlikely() annotations on QUEUE_FLAG_QOS_ENABLED checks are counterproductive. Writeback throttling (WBT) might be enabled by default, mainly because CONFIG_BLK_WBT_MQ defaults to 'y'. Branch profiling on Meta servers, which have WBT enabled, confirms 100% misprediction rates on these checks. Remove the unlikely() annotations to let the CPU's branch predictor learn the actual behavior, potentially improving I/O path performance. Signed-off-by: Breno Leitao <[email protected]>
1 parent f962a4d commit 30444ea

File tree

1 file changed

+9
-16
lines changed

1 file changed

+9
-16
lines changed

block/blk-rq-qos.h

Lines changed: 9 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -112,29 +112,26 @@ void __rq_qos_queue_depth_changed(struct rq_qos *rqos);
112112

113113
static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
114114
{
115-
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
116-
q->rq_qos)
115+
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
117116
__rq_qos_cleanup(q->rq_qos, bio);
118117
}
119118

120119
static inline void rq_qos_done(struct request_queue *q, struct request *rq)
121120
{
122-
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
123-
q->rq_qos && !blk_rq_is_passthrough(rq))
121+
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) &&
122+
q->rq_qos && !blk_rq_is_passthrough(rq))
124123
__rq_qos_done(q->rq_qos, rq);
125124
}
126125

127126
static inline void rq_qos_issue(struct request_queue *q, struct request *rq)
128127
{
129-
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
130-
q->rq_qos)
128+
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
131129
__rq_qos_issue(q->rq_qos, rq);
132130
}
133131

134132
static inline void rq_qos_requeue(struct request_queue *q, struct request *rq)
135133
{
136-
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
137-
q->rq_qos)
134+
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
138135
__rq_qos_requeue(q->rq_qos, rq);
139136
}
140137

@@ -162,8 +159,7 @@ static inline void rq_qos_done_bio(struct bio *bio)
162159

163160
static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
164161
{
165-
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
166-
q->rq_qos) {
162+
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos) {
167163
bio_set_flag(bio, BIO_QOS_THROTTLED);
168164
__rq_qos_throttle(q->rq_qos, bio);
169165
}
@@ -172,25 +168,22 @@ static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
172168
static inline void rq_qos_track(struct request_queue *q, struct request *rq,
173169
struct bio *bio)
174170
{
175-
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
176-
q->rq_qos)
171+
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
177172
__rq_qos_track(q->rq_qos, rq, bio);
178173
}
179174

180175
static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
181176
struct bio *bio)
182177
{
183-
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
184-
q->rq_qos) {
178+
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos) {
185179
bio_set_flag(bio, BIO_QOS_MERGED);
186180
__rq_qos_merge(q->rq_qos, rq, bio);
187181
}
188182
}
189183

190184
static inline void rq_qos_queue_depth_changed(struct request_queue *q)
191185
{
192-
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
193-
q->rq_qos)
186+
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
194187
__rq_qos_queue_depth_changed(q->rq_qos);
195188
}
196189

0 commit comments

Comments
 (0)