Commit d92ca9d8 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

blk-mq: don't handle non-flush requests in blk_insert_flush

Return to the normal blk_mq_submit_bio flow if the bio did not end up
actually being a flush because the device didn't support it.  Note that
this is basically impossible to hit without special instrumentation given
that submit_bio_checks already clears these flags usually, so we'd need a
tight race to actually hit this code path.

With this the call to blk_mq_run_hw_queue for the flush requests can be
removed given that the actual flush requests are always issued via the
requeue workqueue which runs the queue unconditionally.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20211019122553.2467817-1-hch@lst.deSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent dc5fc361
...@@ -379,7 +379,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error) ...@@ -379,7 +379,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
* @rq is being submitted. Analyze what needs to be done and put it on the * @rq is being submitted. Analyze what needs to be done and put it on the
* right queue. * right queue.
*/ */
void blk_insert_flush(struct request *rq) bool blk_insert_flush(struct request *rq)
{ {
struct request_queue *q = rq->q; struct request_queue *q = rq->q;
unsigned long fflags = q->queue_flags; /* may change, cache */ unsigned long fflags = q->queue_flags; /* may change, cache */
...@@ -409,7 +409,7 @@ void blk_insert_flush(struct request *rq) ...@@ -409,7 +409,7 @@ void blk_insert_flush(struct request *rq)
*/ */
if (!policy) { if (!policy) {
blk_mq_end_request(rq, 0); blk_mq_end_request(rq, 0);
return; return true;
} }
BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */ BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */
...@@ -420,10 +420,8 @@ void blk_insert_flush(struct request *rq) ...@@ -420,10 +420,8 @@ void blk_insert_flush(struct request *rq)
* for normal execution. * for normal execution.
*/ */
if ((policy & REQ_FSEQ_DATA) && if ((policy & REQ_FSEQ_DATA) &&
!(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH)))
blk_mq_request_bypass_insert(rq, false, false); return false;
return;
}
/* /*
* @rq should go through flush machinery. Mark it part of flush * @rq should go through flush machinery. Mark it part of flush
...@@ -439,6 +437,8 @@ void blk_insert_flush(struct request *rq) ...@@ -439,6 +437,8 @@ void blk_insert_flush(struct request *rq)
spin_lock_irq(&fq->mq_flush_lock); spin_lock_irq(&fq->mq_flush_lock);
blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0); blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
spin_unlock_irq(&fq->mq_flush_lock); spin_unlock_irq(&fq->mq_flush_lock);
return true;
} }
/** /**
......
...@@ -2532,12 +2532,10 @@ void blk_mq_submit_bio(struct bio *bio) ...@@ -2532,12 +2532,10 @@ void blk_mq_submit_bio(struct bio *bio)
return; return;
} }
if (unlikely(is_flush_fua)) { if (is_flush_fua && blk_insert_flush(rq))
struct blk_mq_hw_ctx *hctx = rq->mq_hctx; return;
/* Bypass scheduler for flush requests */
blk_insert_flush(rq); if (plug && (q->nr_hw_queues == 1 ||
blk_mq_run_hw_queue(hctx, true);
} else if (plug && (q->nr_hw_queues == 1 ||
blk_mq_is_shared_tags(rq->mq_hctx->flags) || blk_mq_is_shared_tags(rq->mq_hctx->flags) ||
q->mq_ops->commit_rqs || !blk_queue_nonrot(q))) { q->mq_ops->commit_rqs || !blk_queue_nonrot(q))) {
/* /*
......
...@@ -236,7 +236,7 @@ void __blk_account_io_done(struct request *req, u64 now); ...@@ -236,7 +236,7 @@ void __blk_account_io_done(struct request *req, u64 now);
*/ */
#define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED) #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
void blk_insert_flush(struct request *rq); bool blk_insert_flush(struct request *rq);
int elevator_switch_mq(struct request_queue *q, int elevator_switch_mq(struct request_queue *q,
struct elevator_type *new_e); struct elevator_type *new_e);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment