Commit 105663f7 authored by André Almeida's avatar André Almeida Committed by Jens Axboe

blk-mq: Document functions for sending request

Add or improve documentation for function regarding creating and sending
IO requests to the hardware.
Signed-off-by: default avatarAndré Almeida <andrealmeid@collabora.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent a754bd5f
...@@ -641,6 +641,14 @@ bool blk_mq_complete_request(struct request *rq) ...@@ -641,6 +641,14 @@ bool blk_mq_complete_request(struct request *rq)
} }
EXPORT_SYMBOL(blk_mq_complete_request); EXPORT_SYMBOL(blk_mq_complete_request);
/**
* blk_mq_start_request - Start processing a request
* @rq: Pointer to request to be started
*
* Function used by device drivers to notify the block layer that a request
* is going to be processed now, so blk layer can do proper initializations
* such as starting the timeout timer.
*/
void blk_mq_start_request(struct request *rq) void blk_mq_start_request(struct request *rq)
{ {
struct request_queue *q = rq->q; struct request_queue *q = rq->q;
...@@ -1327,6 +1335,12 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list, ...@@ -1327,6 +1335,12 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
return (queued + errors) != 0; return (queued + errors) != 0;
} }
/**
* __blk_mq_run_hw_queue - Run a hardware queue.
* @hctx: Pointer to the hardware queue to run.
*
* Send pending requests to the hardware.
*/
static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
{ {
int srcu_idx; int srcu_idx;
...@@ -1424,6 +1438,15 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx) ...@@ -1424,6 +1438,15 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
return next_cpu; return next_cpu;
} }
/**
* __blk_mq_delay_run_hw_queue - Run (or schedule to run) a hardware queue.
* @hctx: Pointer to the hardware queue to run.
* @async: If we want to run the queue asynchronously.
* @msecs: Microseconds of delay to wait before running the queue.
*
* If !@async, try to run the queue now. Else, run the queue asynchronously and
* with a delay of @msecs.
*/
static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async, static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
unsigned long msecs) unsigned long msecs)
{ {
...@@ -1445,12 +1468,28 @@ static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async, ...@@ -1445,12 +1468,28 @@ static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
msecs_to_jiffies(msecs)); msecs_to_jiffies(msecs));
} }
/**
* blk_mq_delay_run_hw_queue - Run a hardware queue asynchronously.
* @hctx: Pointer to the hardware queue to run.
* @msecs: Microseconds of delay to wait before running the queue.
*
* Run a hardware queue asynchronously with a delay of @msecs.
*/
void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs) void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
{ {
__blk_mq_delay_run_hw_queue(hctx, true, msecs); __blk_mq_delay_run_hw_queue(hctx, true, msecs);
} }
EXPORT_SYMBOL(blk_mq_delay_run_hw_queue); EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
/**
* blk_mq_run_hw_queue - Start to run a hardware queue.
* @hctx: Pointer to the hardware queue to run.
* @async: If we want to run the queue asynchronously.
*
* Check if the request queue is not in a quiesced state and if there are
* pending requests to be sent. If this is true, run the queue to send requests
* to hardware.
*/
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
{ {
int srcu_idx; int srcu_idx;
...@@ -1474,6 +1513,11 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) ...@@ -1474,6 +1513,11 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
} }
EXPORT_SYMBOL(blk_mq_run_hw_queue); EXPORT_SYMBOL(blk_mq_run_hw_queue);
/**
* blk_mq_run_hw_queue - Run all hardware queues in a request queue.
* @q: Pointer to the request queue to run.
* @async: If we want to run the queue asynchronously.
*/
void blk_mq_run_hw_queues(struct request_queue *q, bool async) void blk_mq_run_hw_queues(struct request_queue *q, bool async)
{ {
struct blk_mq_hw_ctx *hctx; struct blk_mq_hw_ctx *hctx;
...@@ -1625,7 +1669,11 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, ...@@ -1625,7 +1669,11 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
blk_mq_hctx_mark_pending(hctx, ctx); blk_mq_hctx_mark_pending(hctx, ctx);
} }
/* /**
* blk_mq_request_bypass_insert - Insert a request at dispatch list.
* @rq: Pointer to request to be inserted.
* @run_queue: If we should run the hardware queue after inserting the request.
*
* Should only be used carefully, when the caller knows we want to * Should only be used carefully, when the caller knows we want to
* bypass a potential IO scheduler on the target device. * bypass a potential IO scheduler on the target device.
*/ */
...@@ -1805,6 +1853,17 @@ static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, ...@@ -1805,6 +1853,17 @@ static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK; return BLK_STS_OK;
} }
/**
* blk_mq_try_issue_directly - Try to send a request directly to device driver.
* @hctx: Pointer of the associated hardware queue.
* @rq: Pointer to request to be sent.
* @cookie: Request queue cookie.
*
* If the device has enough resources to accept a new request now, send the
* request directly to device driver. Else, insert at hctx->dispatch queue, so
* we can try send it another time in the future. Requests inserted at this
* queue have higher priority.
*/
static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
struct request *rq, blk_qc_t *cookie) struct request *rq, blk_qc_t *cookie)
{ {
...@@ -1882,6 +1941,22 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq) ...@@ -1882,6 +1941,22 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
} }
} }
/**
* blk_mq_make_request - Create and send a request to block device.
* @q: Request queue pointer.
* @bio: Bio pointer.
*
* Builds up a request structure from @q and @bio and send to the device. The
* request may not be queued directly to hardware if:
* * This request can be merged with another one
* * We want to place request at plug queue for possible future merging
* * There is an IO scheduler active at this queue
*
* It will not queue the request if there is an error with the bio, or at the
* request creation.
*
* Returns: Request queue cookie.
*/
static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
{ {
const int is_sync = op_is_sync(bio->bi_opf); const int is_sync = op_is_sync(bio->bi_opf);
...@@ -1927,7 +2002,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) ...@@ -1927,7 +2002,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
plug = blk_mq_plug(q, bio); plug = blk_mq_plug(q, bio);
if (unlikely(is_flush_fua)) { if (unlikely(is_flush_fua)) {
/* bypass scheduler for flush rq */ /* Bypass scheduler for flush requests */
blk_insert_flush(rq); blk_insert_flush(rq);
blk_mq_run_hw_queue(data.hctx, true); blk_mq_run_hw_queue(data.hctx, true);
} else if (plug && (q->nr_hw_queues == 1 || q->mq_ops->commit_rqs || } else if (plug && (q->nr_hw_queues == 1 || q->mq_ops->commit_rqs ||
...@@ -1955,6 +2030,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) ...@@ -1955,6 +2030,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
blk_add_rq_to_plug(plug, rq); blk_add_rq_to_plug(plug, rq);
} else if (q->elevator) { } else if (q->elevator) {
/* Insert the request at the IO scheduler queue */
blk_mq_sched_insert_request(rq, false, true, true); blk_mq_sched_insert_request(rq, false, true, true);
} else if (plug && !blk_queue_nomerges(q)) { } else if (plug && !blk_queue_nomerges(q)) {
/* /*
...@@ -1981,8 +2057,13 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) ...@@ -1981,8 +2057,13 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
} }
} else if ((q->nr_hw_queues > 1 && is_sync) || } else if ((q->nr_hw_queues > 1 && is_sync) ||
!data.hctx->dispatch_busy) { !data.hctx->dispatch_busy) {
/*
* There is no scheduler and we can try to send directly
* to the hardware.
*/
blk_mq_try_issue_directly(data.hctx, rq, &cookie); blk_mq_try_issue_directly(data.hctx, rq, &cookie);
} else { } else {
/* Default case. */
blk_mq_sched_insert_request(rq, false, true, true); blk_mq_sched_insert_request(rq, false, true, true);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment