blk: Fix lock inversion between ioc lock and bfqd lock
Lockdep complains about lock inversion between ioc->lock and bfqd->lock: bfqd -> ioc: put_io_context+0x33/0x90 -> ioc->lock grabbed blk_mq_free_request+0x51/0x140 blk_put_request+0xe/0x10 blk_attempt_req_merge+0x1d/0x30 elv_attempt_insert_merge+0x56/0xa0 blk_mq_sched_try_insert_merge+0x4b/0x60 bfq_insert_requests+0x9e/0x18c0 -> bfqd->lock grabbed blk_mq_sched_insert_requests+0xd6/0x2b0 blk_mq_flush_plug_list+0x154/0x280 blk_finish_plug+0x40/0x60 ext4_writepages+0x696/0x1320 do_writepages+0x1c/0x80 __filemap_fdatawrite_range+0xd7/0x120 sync_file_range+0xac/0xf0 ioc->bfqd: bfq_exit_icq+0xa3/0xe0 -> bfqd->lock grabbed put_io_context_active+0x78/0xb0 -> ioc->lock grabbed exit_io_context+0x48/0x50 do_exit+0x7e9/0xdd0 do_group_exit+0x54/0xc0 To avoid this inversion we change blk_mq_sched_try_insert_merge() to not free the merged request but rather leave that upto the caller similarly to blk_mq_sched_try_merge(). And in bfq_insert_requests() we make sure to free all the merged requests after dropping bfqd->lock. Fixes: aee69d78dec0 ("block, bfq: introduce the BFQ-v0 I/O scheduler as an extra scheduler") Reviewed-by: Ming Lei <ming.lei@redhat.com> Acked-by: Paolo Valente <paolo.valente@linaro.org> Signed-off-by: Jan Kara <jack@suse.cz> Link: https://lore.kernel.org/r/20210623093634.27879-3-jack@suse.cz Signed-off-by: Jens Axboe <axboe@kernel.dk> (cherry picked from commit fd2ef39cc9a6b9c4c41864ac506906c52f94b06a) (cherry picked from commit 786e392c4a7bd2559bdc1a1c6ac28d8b612a0735) (cherry picked from commit aa8e3e1451bde73dff60f1e5110b6a3cb810e35b) (cherry picked from commit 4deef6abb13a82b148c583d9ab37374c876fe4c2) (cherry picked from commit 1988f864ec1c494bb54e5b9df1611195f6d923f2) (cherry picked from commit 9dc0074b0dd8960f9e06dc1494855493ff53eb68) (cherry picked from commit c937983724111bb4526e34da0d5c6c8aea1902af)
This commit is contained in:
parent
90c0c9aa4a
commit
c72c9473f5
9 changed files with 43 additions and 22 deletions
|
@ -2250,9 +2250,9 @@ static bool bfq_bio_merge(struct request_queue *q, struct bio *bio,
|
|||
|
||||
ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
|
||||
|
||||
spin_unlock_irq(&bfqd->lock);
|
||||
if (free)
|
||||
blk_mq_free_request(free);
|
||||
spin_unlock_irq(&bfqd->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -5524,6 +5524,7 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
|
|||
struct bfq_queue *bfqq;
|
||||
bool idle_timer_disabled = false;
|
||||
unsigned int cmd_flags;
|
||||
LIST_HEAD(free);
|
||||
|
||||
#ifdef CONFIG_BFQ_GROUP_IOSCHED
|
||||
if (!cgroup_subsys_on_dfl(io_cgrp_subsys) && rq->bio)
|
||||
|
@ -5531,8 +5532,9 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
|
|||
#endif
|
||||
spin_lock_irq(&bfqd->lock);
|
||||
bfqq = bfq_init_rq(rq);
|
||||
if (blk_mq_sched_try_insert_merge(q, rq)) {
|
||||
if (blk_mq_sched_try_insert_merge(q, rq, &free)) {
|
||||
spin_unlock_irq(&bfqd->lock);
|
||||
blk_mq_free_requests(&free);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -842,18 +842,15 @@ static struct request *attempt_front_merge(struct request_queue *q,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
|
||||
struct request *next)
|
||||
/*
|
||||
* Try to merge 'next' into 'rq'. Return true if the merge happened, false
|
||||
* otherwise. The caller is responsible for freeing 'next' if the merge
|
||||
* happened.
|
||||
*/
|
||||
bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
|
||||
struct request *next)
|
||||
{
|
||||
struct request *free;
|
||||
|
||||
free = attempt_merge(q, rq, next);
|
||||
if (free) {
|
||||
blk_put_request(free);
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return attempt_merge(q, rq, next);
|
||||
}
|
||||
|
||||
bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
|
||||
|
|
|
@ -386,9 +386,10 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
|
|||
return ret;
|
||||
}
|
||||
|
||||
bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq)
|
||||
bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
|
||||
struct list_head *free)
|
||||
{
|
||||
return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq);
|
||||
return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq, free);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge);
|
||||
|
||||
|
|
|
@ -12,7 +12,8 @@ bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
|
|||
unsigned int nr_segs, struct request **merged_request);
|
||||
bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
|
||||
unsigned int nr_segs);
|
||||
bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
|
||||
bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
|
||||
struct list_head *free);
|
||||
void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx);
|
||||
void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
|
||||
|
||||
|
|
|
@ -284,6 +284,17 @@ static inline struct blk_plug *blk_mq_plug(struct request_queue *q,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
/* Free all requests on the list */
|
||||
static inline void blk_mq_free_requests(struct list_head *list)
|
||||
{
|
||||
while (!list_empty(list)) {
|
||||
struct request *rq = list_entry_rq(list->next);
|
||||
|
||||
list_del_init(&rq->queuelist);
|
||||
blk_mq_free_request(rq);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* For shared tag users, we track the number of currently active users
|
||||
* and attempt to provide a fair share of the tag depth for each of them.
|
||||
|
|
|
@ -236,7 +236,7 @@ ssize_t part_timeout_store(struct device *, struct device_attribute *,
|
|||
void __blk_queue_split(struct bio **bio, unsigned int *nr_segs);
|
||||
int ll_back_merge_fn(struct request *req, struct bio *bio,
|
||||
unsigned int nr_segs);
|
||||
int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
|
||||
bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
|
||||
struct request *next);
|
||||
unsigned int blk_recalc_rq_segments(struct request *rq);
|
||||
void blk_rq_set_mixed_merge(struct request *rq);
|
||||
|
|
|
@ -354,9 +354,11 @@ enum elv_merge elv_merge(struct request_queue *q, struct request **req,
|
|||
* we can append 'rq' to an existing request, so we can throw 'rq' away
|
||||
* afterwards.
|
||||
*
|
||||
* Returns true if we merged, false otherwise
|
||||
* Returns true if we merged, false otherwise. 'free' will contain all
|
||||
* requests that need to be freed.
|
||||
*/
|
||||
bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq)
|
||||
bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq,
|
||||
struct list_head *free)
|
||||
{
|
||||
struct request *__rq;
|
||||
bool ret;
|
||||
|
@ -367,8 +369,10 @@ bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq)
|
|||
/*
|
||||
* First try one-hit cache.
|
||||
*/
|
||||
if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq))
|
||||
if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq)) {
|
||||
list_add(&rq->queuelist, free);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (blk_queue_noxmerges(q))
|
||||
return false;
|
||||
|
@ -382,6 +386,7 @@ bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq)
|
|||
if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
|
||||
break;
|
||||
|
||||
list_add(&rq->queuelist, free);
|
||||
/* The merged request could be merged with others, try again */
|
||||
ret = true;
|
||||
rq = __rq;
|
||||
|
|
|
@ -719,6 +719,7 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
|
|||
struct dd_per_prio *per_prio;
|
||||
enum dd_prio prio;
|
||||
struct dd_blkcg *blkcg;
|
||||
LIST_HEAD(free);
|
||||
|
||||
lockdep_assert_held(&dd->lock);
|
||||
|
||||
|
@ -741,8 +742,10 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
|
|||
ddcg_count(blkcg, inserted, ioprio_class);
|
||||
rq->elv.priv[0] = blkcg;
|
||||
|
||||
if (blk_mq_sched_try_insert_merge(q, rq))
|
||||
if (blk_mq_sched_try_insert_merge(q, rq, &free)) {
|
||||
blk_mq_free_requests(&free);
|
||||
return;
|
||||
}
|
||||
|
||||
blk_mq_sched_request_inserted(rq);
|
||||
|
||||
|
|
|
@ -126,7 +126,8 @@ extern void elv_merge_requests(struct request_queue *, struct request *,
|
|||
struct request *);
|
||||
extern void elv_merged_request(struct request_queue *, struct request *,
|
||||
enum elv_merge);
|
||||
extern bool elv_attempt_insert_merge(struct request_queue *, struct request *);
|
||||
extern bool elv_attempt_insert_merge(struct request_queue *, struct request *,
|
||||
struct list_head *);
|
||||
extern struct request *elv_former_request(struct request_queue *, struct request *);
|
||||
extern struct request *elv_latter_request(struct request_queue *, struct request *);
|
||||
|
||||
|
|
Loading…
Reference in a new issue