Commit 360f2648 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

blk-mq: defer to the normal submission path for non-flush flush commands

If blk_insert_flush decides that a command does not need to use the
flush state machine, return false and let blk_mq_submit_bio handle
it the normal way (including using an I/O scheduler) instead of doing
a bypass insert.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarBart Van Assche <bvanassche@acm.org>
Reviewed-by: default avatarDamien Le Moal <dlemoal@kernel.org>
Link: https://lore.kernel.org/r/20230519044050.107790-4-hch@lst.deSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent c1075e54
...@@ -385,22 +385,17 @@ static void blk_rq_init_flush(struct request *rq) ...@@ -385,22 +385,17 @@ static void blk_rq_init_flush(struct request *rq)
rq->end_io = mq_flush_data_end_io; rq->end_io = mq_flush_data_end_io;
} }
/** /*
* blk_insert_flush - insert a new PREFLUSH/FUA request * Insert a PREFLUSH/FUA request into the flush state machine.
* @rq: request to insert * Returns true if the request has been consumed by the flush state machine,
* * or false if the caller should continue to process it.
* To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions.
* or __blk_mq_run_hw_queue() to dispatch request.
* @rq is being submitted. Analyze what needs to be done and put it on the
* right queue.
*/ */
void blk_insert_flush(struct request *rq) bool blk_insert_flush(struct request *rq)
{ {
struct request_queue *q = rq->q; struct request_queue *q = rq->q;
unsigned long fflags = q->queue_flags; /* may change, cache */ unsigned long fflags = q->queue_flags; /* may change, cache */
unsigned int policy = blk_flush_policy(fflags, rq); unsigned int policy = blk_flush_policy(fflags, rq);
struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx); struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
/* FLUSH/FUA request must never be merged */ /* FLUSH/FUA request must never be merged */
WARN_ON_ONCE(rq->bio != rq->biotail); WARN_ON_ONCE(rq->bio != rq->biotail);
...@@ -429,16 +424,14 @@ void blk_insert_flush(struct request *rq) ...@@ -429,16 +424,14 @@ void blk_insert_flush(struct request *rq)
* complete the request. * complete the request.
*/ */
blk_mq_end_request(rq, 0); blk_mq_end_request(rq, 0);
return; return true;
case REQ_FSEQ_DATA: case REQ_FSEQ_DATA:
/* /*
* If there's data, but no flush is necessary, the request can * If there's data, but no flush is necessary, the request can
* be processed directly without going through flush machinery. * be processed directly without going through flush machinery.
* Queue for normal execution. * Queue for normal execution.
*/ */
blk_mq_request_bypass_insert(rq, 0); return false;
blk_mq_run_hw_queue(hctx, false);
return;
default: default:
/* /*
* Mark the request as part of a flush sequence and submit it * Mark the request as part of a flush sequence and submit it
...@@ -448,6 +441,7 @@ void blk_insert_flush(struct request *rq) ...@@ -448,6 +441,7 @@ void blk_insert_flush(struct request *rq)
spin_lock_irq(&fq->mq_flush_lock); spin_lock_irq(&fq->mq_flush_lock);
blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0); blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
spin_unlock_irq(&fq->mq_flush_lock); spin_unlock_irq(&fq->mq_flush_lock);
return true;
} }
} }
......
...@@ -45,6 +45,8 @@ ...@@ -45,6 +45,8 @@
static DEFINE_PER_CPU(struct llist_head, blk_cpu_done); static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
static void blk_mq_insert_request(struct request *rq, blk_insert_t flags); static void blk_mq_insert_request(struct request *rq, blk_insert_t flags);
static void blk_mq_request_bypass_insert(struct request *rq,
blk_insert_t flags);
static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
struct list_head *list); struct list_head *list);
...@@ -2430,7 +2432,7 @@ static void blk_mq_run_work_fn(struct work_struct *work) ...@@ -2430,7 +2432,7 @@ static void blk_mq_run_work_fn(struct work_struct *work)
* Should only be used carefully, when the caller knows we want to * Should only be used carefully, when the caller knows we want to
* bypass a potential IO scheduler on the target device. * bypass a potential IO scheduler on the target device.
*/ */
void blk_mq_request_bypass_insert(struct request *rq, blk_insert_t flags) static void blk_mq_request_bypass_insert(struct request *rq, blk_insert_t flags)
{ {
struct blk_mq_hw_ctx *hctx = rq->mq_hctx; struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
...@@ -2977,10 +2979,8 @@ void blk_mq_submit_bio(struct bio *bio) ...@@ -2977,10 +2979,8 @@ void blk_mq_submit_bio(struct bio *bio)
return; return;
} }
if (op_is_flush(bio->bi_opf)) { if (op_is_flush(bio->bi_opf) && blk_insert_flush(rq))
blk_insert_flush(rq);
return; return;
}
if (plug) { if (plug) {
blk_add_rq_to_plug(plug, rq); blk_add_rq_to_plug(plug, rq);
......
...@@ -64,10 +64,6 @@ struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, ...@@ -64,10 +64,6 @@ struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set, void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
struct blk_mq_tags *tags, struct blk_mq_tags *tags,
unsigned int hctx_idx); unsigned int hctx_idx);
/*
* Internal helpers for request insertion into sw queues
*/
void blk_mq_request_bypass_insert(struct request *rq, blk_insert_t flags);
/* /*
* CPU -> queue mappings * CPU -> queue mappings
......
...@@ -269,7 +269,7 @@ bool blk_bio_list_merge(struct request_queue *q, struct list_head *list, ...@@ -269,7 +269,7 @@ bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
*/ */
#define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED) #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
void blk_insert_flush(struct request *rq); bool blk_insert_flush(struct request *rq);
int elevator_switch(struct request_queue *q, struct elevator_type *new_e); int elevator_switch(struct request_queue *q, struct elevator_type *new_e);
void elevator_disable(struct request_queue *q); void elevator_disable(struct request_queue *q);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment