Commit 9d497e29 authored by Ming Lei's avatar Ming Lei Committed by Jens Axboe

block: don't protect submit_bio_checks by q_usage_counter

Commit cc9c884d ("block: call submit_bio_checks under q_usage_counter")
uses q_usage_counter to protect submit_bio_checks for avoiding IO after
disk is deleted by del_gendisk().

Turns out the protection isn't necessary, because once
blk_mq_freeze_queue_wait() in del_gendisk() returns:

1) all in-flight IO has been done

2) all new IO will be failed in __bio_queue_enter() because
   q_usage_counter is dead, and GD_DEAD is set

3) both disk and request queue instance are safe since caller of
submit_bio() guarantees that the disk can't be closed.

Once submit_bio_checks() needn't the protection of q_usage_counter, we can
move submit_bio_checks before calling blk_mq_submit_bio() and
->submit_bio(). With this change, we needn't to throttle queue with
holding one allocated request, then precise driver tag or request won't be
wasted in throttling. Meantime we can unify the bio check for both bio
based and request based driver.

Cc: Christoph Hellwig <hch@lst.de>
Signed-off-by: default avatarMing Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/20220104134223.590803-1-ming.lei@redhat.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 292c33c9
...@@ -787,17 +787,21 @@ noinline_for_stack bool submit_bio_checks(struct bio *bio) ...@@ -787,17 +787,21 @@ noinline_for_stack bool submit_bio_checks(struct bio *bio)
static void __submit_bio_fops(struct gendisk *disk, struct bio *bio) static void __submit_bio_fops(struct gendisk *disk, struct bio *bio)
{ {
if (unlikely(bio_queue_enter(bio) != 0)) if (blk_crypto_bio_prep(&bio)) {
return; if (likely(bio_queue_enter(bio) == 0)) {
if (submit_bio_checks(bio) && blk_crypto_bio_prep(&bio)) disk->fops->submit_bio(bio);
disk->fops->submit_bio(bio); blk_queue_exit(disk->queue);
blk_queue_exit(disk->queue); }
}
} }
static void __submit_bio(struct bio *bio) static void __submit_bio(struct bio *bio)
{ {
struct gendisk *disk = bio->bi_bdev->bd_disk; struct gendisk *disk = bio->bi_bdev->bd_disk;
if (unlikely(!submit_bio_checks(bio)))
return;
if (!disk->fops->submit_bio) if (!disk->fops->submit_bio)
blk_mq_submit_bio(bio); blk_mq_submit_bio(bio);
else else
......
...@@ -2714,26 +2714,18 @@ static bool blk_mq_attempt_bio_merge(struct request_queue *q, ...@@ -2714,26 +2714,18 @@ static bool blk_mq_attempt_bio_merge(struct request_queue *q,
static struct request *blk_mq_get_new_requests(struct request_queue *q, static struct request *blk_mq_get_new_requests(struct request_queue *q,
struct blk_plug *plug, struct blk_plug *plug,
struct bio *bio, struct bio *bio)
unsigned int nsegs)
{ {
struct blk_mq_alloc_data data = { struct blk_mq_alloc_data data = {
.q = q, .q = q,
.nr_tags = 1, .nr_tags = 1,
.cmd_flags = bio->bi_opf,
}; };
struct request *rq; struct request *rq;
if (unlikely(bio_queue_enter(bio))) if (unlikely(bio_queue_enter(bio)))
return NULL; return NULL;
if (unlikely(!submit_bio_checks(bio)))
goto queue_exit;
if (blk_mq_attempt_bio_merge(q, bio, nsegs))
goto queue_exit;
rq_qos_throttle(q, bio);
/* ->bi_opf is finalized after submit_bio_checks() returns */
data.cmd_flags = bio->bi_opf;
if (plug) { if (plug) {
data.nr_tags = plug->nr_ios; data.nr_tags = plug->nr_ios;
plug->nr_ios = 1; plug->nr_ios = 1;
...@@ -2746,13 +2738,12 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q, ...@@ -2746,13 +2738,12 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
rq_qos_cleanup(q, bio); rq_qos_cleanup(q, bio);
if (bio->bi_opf & REQ_NOWAIT) if (bio->bi_opf & REQ_NOWAIT)
bio_wouldblock_error(bio); bio_wouldblock_error(bio);
queue_exit:
blk_queue_exit(q); blk_queue_exit(q);
return NULL; return NULL;
} }
static inline struct request *blk_mq_get_cached_request(struct request_queue *q, static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
struct blk_plug *plug, struct bio **bio, unsigned int nsegs) struct blk_plug *plug, struct bio *bio)
{ {
struct request *rq; struct request *rq;
...@@ -2762,21 +2753,14 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q, ...@@ -2762,21 +2753,14 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
if (!rq || rq->q != q) if (!rq || rq->q != q)
return NULL; return NULL;
if (unlikely(!submit_bio_checks(*bio))) if (blk_mq_get_hctx_type(bio->bi_opf) != rq->mq_hctx->type)
return NULL;
if (blk_mq_attempt_bio_merge(q, *bio, nsegs)) {
*bio = NULL;
return NULL; return NULL;
} if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf))
if (blk_mq_get_hctx_type((*bio)->bi_opf) != rq->mq_hctx->type)
return NULL;
if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf))
return NULL; return NULL;
rq->cmd_flags = (*bio)->bi_opf; rq->cmd_flags = bio->bi_opf;
plug->cached_rq = rq_list_next(rq); plug->cached_rq = rq_list_next(rq);
INIT_LIST_HEAD(&rq->queuelist); INIT_LIST_HEAD(&rq->queuelist);
rq_qos_throttle(q, *bio);
return rq; return rq;
} }
...@@ -2812,11 +2796,14 @@ void blk_mq_submit_bio(struct bio *bio) ...@@ -2812,11 +2796,14 @@ void blk_mq_submit_bio(struct bio *bio)
if (!bio_integrity_prep(bio)) if (!bio_integrity_prep(bio))
return; return;
rq = blk_mq_get_cached_request(q, plug, &bio, nr_segs); if (blk_mq_attempt_bio_merge(q, bio, nr_segs))
return;
rq_qos_throttle(q, bio);
rq = blk_mq_get_cached_request(q, plug, bio);
if (!rq) { if (!rq) {
if (!bio) rq = blk_mq_get_new_requests(q, plug, bio);
return;
rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
if (unlikely(!rq)) if (unlikely(!rq))
return; return;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment