Commit 4b6a5d9c authored by Jens Axboe's avatar Jens Axboe

block: enable batched allocation for blk_mq_alloc_request()

The filesystem IO path can take advantage of allocating batches of
requests, if the underlying submitter tells the block layer about it
through the blk_plug. For passthrough IO, the exported API is the
blk_mq_alloc_request() helper, and that one does not allow for
request caching.

Wire up request caching for blk_mq_alloc_request(), which is generally
done without having a bio available upfront.
Tested-by: default avatarAnuj Gupta <anuj20.g@samsung.com>
Reviewed-by: default avatarKeith Busch <kbusch@kernel.org>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent e73a625b
...@@ -510,25 +510,87 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data) ...@@ -510,25 +510,87 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
alloc_time_ns); alloc_time_ns);
} }
struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf, static struct request *blk_mq_rq_cache_fill(struct request_queue *q,
blk_mq_req_flags_t flags) struct blk_plug *plug,
blk_opf_t opf,
blk_mq_req_flags_t flags)
{ {
struct blk_mq_alloc_data data = { struct blk_mq_alloc_data data = {
.q = q, .q = q,
.flags = flags, .flags = flags,
.cmd_flags = opf, .cmd_flags = opf,
.nr_tags = 1, .nr_tags = plug->nr_ios,
.cached_rq = &plug->cached_rq,
}; };
struct request *rq; struct request *rq;
int ret;
ret = blk_queue_enter(q, flags); if (blk_queue_enter(q, flags))
if (ret) return NULL;
return ERR_PTR(ret);
plug->nr_ios = 1;
rq = __blk_mq_alloc_requests(&data); rq = __blk_mq_alloc_requests(&data);
if (!rq) if (unlikely(!rq))
goto out_queue_exit; blk_queue_exit(q);
return rq;
}
static struct request *blk_mq_alloc_cached_request(struct request_queue *q,
blk_opf_t opf,
blk_mq_req_flags_t flags)
{
struct blk_plug *plug = current->plug;
struct request *rq;
if (!plug)
return NULL;
if (rq_list_empty(plug->cached_rq)) {
if (plug->nr_ios == 1)
return NULL;
rq = blk_mq_rq_cache_fill(q, plug, opf, flags);
if (rq)
goto got_it;
return NULL;
}
rq = rq_list_peek(&plug->cached_rq);
if (!rq || rq->q != q)
return NULL;
if (blk_mq_get_hctx_type(opf) != rq->mq_hctx->type)
return NULL;
if (op_is_flush(rq->cmd_flags) != op_is_flush(opf))
return NULL;
plug->cached_rq = rq_list_next(rq);
got_it:
rq->cmd_flags = opf;
INIT_LIST_HEAD(&rq->queuelist);
return rq;
}
struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf,
blk_mq_req_flags_t flags)
{
struct request *rq;
rq = blk_mq_alloc_cached_request(q, opf, flags);
if (!rq) {
struct blk_mq_alloc_data data = {
.q = q,
.flags = flags,
.cmd_flags = opf,
.nr_tags = 1,
};
int ret;
ret = blk_queue_enter(q, flags);
if (ret)
return ERR_PTR(ret);
rq = __blk_mq_alloc_requests(&data);
if (!rq)
goto out_queue_exit;
}
rq->__data_len = 0; rq->__data_len = 0;
rq->__sector = (sector_t) -1; rq->__sector = (sector_t) -1;
rq->bio = rq->biotail = NULL; rq->bio = rq->biotail = NULL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment