Skip to content

Commit fd2ef39

Browse files
jankaraaxboe
authored andcommitted
blk: Fix lock inversion between ioc lock and bfqd lock
Lockdep complains about lock inversion between ioc->lock and bfqd->lock: bfqd -> ioc: put_io_context+0x33/0x90 -> ioc->lock grabbed blk_mq_free_request+0x51/0x140 blk_put_request+0xe/0x10 blk_attempt_req_merge+0x1d/0x30 elv_attempt_insert_merge+0x56/0xa0 blk_mq_sched_try_insert_merge+0x4b/0x60 bfq_insert_requests+0x9e/0x18c0 -> bfqd->lock grabbed blk_mq_sched_insert_requests+0xd6/0x2b0 blk_mq_flush_plug_list+0x154/0x280 blk_finish_plug+0x40/0x60 ext4_writepages+0x696/0x1320 do_writepages+0x1c/0x80 __filemap_fdatawrite_range+0xd7/0x120 sync_file_range+0xac/0xf0 ioc->bfqd: bfq_exit_icq+0xa3/0xe0 -> bfqd->lock grabbed put_io_context_active+0x78/0xb0 -> ioc->lock grabbed exit_io_context+0x48/0x50 do_exit+0x7e9/0xdd0 do_group_exit+0x54/0xc0 To avoid this inversion we change blk_mq_sched_try_insert_merge() to not free the merged request but rather leave that upto the caller similarly to blk_mq_sched_try_merge(). And in bfq_insert_requests() we make sure to free all the merged requests after dropping bfqd->lock. Fixes: aee69d7 ("block, bfq: introduce the BFQ-v0 I/O scheduler as an extra scheduler") Reviewed-by: Ming Lei <[email protected]> Acked-by: Paolo Valente <[email protected]> Signed-off-by: Jan Kara <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jens Axboe <[email protected]>
1 parent a921c65 commit fd2ef39

File tree

9 files changed

+43
-22
lines changed

9 files changed

+43
-22
lines changed

block/bfq-iosched.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2345,9 +2345,9 @@ static bool bfq_bio_merge(struct request_queue *q, struct bio *bio,
23452345

23462346
ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
23472347

2348+
spin_unlock_irq(&bfqd->lock);
23482349
if (free)
23492350
blk_mq_free_request(free);
2350-
spin_unlock_irq(&bfqd->lock);
23512351

23522352
return ret;
23532353
}
@@ -5969,14 +5969,16 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
59695969
struct bfq_queue *bfqq;
59705970
bool idle_timer_disabled = false;
59715971
unsigned int cmd_flags;
5972+
LIST_HEAD(free);
59725973

59735974
#ifdef CONFIG_BFQ_GROUP_IOSCHED
59745975
if (!cgroup_subsys_on_dfl(io_cgrp_subsys) && rq->bio)
59755976
bfqg_stats_update_legacy_io(q, rq);
59765977
#endif
59775978
spin_lock_irq(&bfqd->lock);
5978-
if (blk_mq_sched_try_insert_merge(q, rq)) {
5979+
if (blk_mq_sched_try_insert_merge(q, rq, &free)) {
59795980
spin_unlock_irq(&bfqd->lock);
5981+
blk_mq_free_requests(&free);
59805982
return;
59815983
}
59825984

block/blk-merge.c

Lines changed: 8 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -846,18 +846,15 @@ static struct request *attempt_front_merge(struct request_queue *q,
846846
return NULL;
847847
}
848848

849-
int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
850-
struct request *next)
849+
/*
850+
* Try to merge 'next' into 'rq'. Return true if the merge happened, false
851+
* otherwise. The caller is responsible for freeing 'next' if the merge
852+
* happened.
853+
*/
854+
bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
855+
struct request *next)
851856
{
852-
struct request *free;
853-
854-
free = attempt_merge(q, rq, next);
855-
if (free) {
856-
blk_put_request(free);
857-
return 1;
858-
}
859-
860-
return 0;
857+
return attempt_merge(q, rq, next);
861858
}
862859

863860
bool blk_rq_merge_ok(struct request *rq, struct bio *bio)

block/blk-mq-sched.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -399,9 +399,10 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
399399
return ret;
400400
}
401401

402-
bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq)
402+
bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
403+
struct list_head *free)
403404
{
404-
return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq);
405+
return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq, free);
405406
}
406407
EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge);
407408

block/blk-mq-sched.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,8 @@ bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
1313
unsigned int nr_segs, struct request **merged_request);
1414
bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
1515
unsigned int nr_segs);
16-
bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
16+
bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
17+
struct list_head *free);
1718
void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx);
1819
void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
1920

block/blk-mq.h

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -302,6 +302,17 @@ static inline struct blk_plug *blk_mq_plug(struct request_queue *q,
302302
return NULL;
303303
}
304304

305+
/* Free all requests on the list */
306+
static inline void blk_mq_free_requests(struct list_head *list)
307+
{
308+
while (!list_empty(list)) {
309+
struct request *rq = list_entry_rq(list->next);
310+
311+
list_del_init(&rq->queuelist);
312+
blk_mq_free_request(rq);
313+
}
314+
}
315+
305316
/*
306317
* For shared tag users, we track the number of currently active users
307318
* and attempt to provide a fair share of the tag depth for each of them.

block/blk.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -224,7 +224,7 @@ ssize_t part_timeout_store(struct device *, struct device_attribute *,
224224
void __blk_queue_split(struct bio **bio, unsigned int *nr_segs);
225225
int ll_back_merge_fn(struct request *req, struct bio *bio,
226226
unsigned int nr_segs);
227-
int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
227+
bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
228228
struct request *next);
229229
unsigned int blk_recalc_rq_segments(struct request *rq);
230230
void blk_rq_set_mixed_merge(struct request *rq);

block/elevator.c

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -350,9 +350,11 @@ enum elv_merge elv_merge(struct request_queue *q, struct request **req,
350350
* we can append 'rq' to an existing request, so we can throw 'rq' away
351351
* afterwards.
352352
*
353-
* Returns true if we merged, false otherwise
353+
* Returns true if we merged, false otherwise. 'free' will contain all
354+
* requests that need to be freed.
354355
*/
355-
bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq)
356+
bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq,
357+
struct list_head *free)
356358
{
357359
struct request *__rq;
358360
bool ret;
@@ -363,8 +365,10 @@ bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq)
363365
/*
364366
* First try one-hit cache.
365367
*/
366-
if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq))
368+
if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq)) {
369+
list_add(&rq->queuelist, free);
367370
return true;
371+
}
368372

369373
if (blk_queue_noxmerges(q))
370374
return false;
@@ -378,6 +382,7 @@ bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq)
378382
if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
379383
break;
380384

385+
list_add(&rq->queuelist, free);
381386
/* The merged request could be merged with others, try again */
382387
ret = true;
383388
rq = __rq;

block/mq-deadline-main.c

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -719,6 +719,7 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
719719
struct dd_per_prio *per_prio;
720720
enum dd_prio prio;
721721
struct dd_blkcg *blkcg;
722+
LIST_HEAD(free);
722723

723724
lockdep_assert_held(&dd->lock);
724725

@@ -742,8 +743,10 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
742743
WARN_ON_ONCE(rq->elv.priv[0]);
743744
rq->elv.priv[0] = blkcg;
744745

745-
if (blk_mq_sched_try_insert_merge(q, rq))
746+
if (blk_mq_sched_try_insert_merge(q, rq, &free)) {
747+
blk_mq_free_requests(&free);
746748
return;
749+
}
747750

748751
trace_block_rq_insert(rq);
749752

include/linux/elevator.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -117,7 +117,8 @@ extern void elv_merge_requests(struct request_queue *, struct request *,
117117
struct request *);
118118
extern void elv_merged_request(struct request_queue *, struct request *,
119119
enum elv_merge);
120-
extern bool elv_attempt_insert_merge(struct request_queue *, struct request *);
120+
extern bool elv_attempt_insert_merge(struct request_queue *, struct request *,
121+
struct list_head *);
121122
extern struct request *elv_former_request(struct request_queue *, struct request *);
122123
extern struct request *elv_latter_request(struct request_queue *, struct request *);
123124
void elevator_init_mq(struct request_queue *q);

0 commit comments

Comments
 (0)