Commit 63064be1 authored by John Garry's avatar John Garry Committed by Jens Axboe

blk-mq: Add blk_mq_alloc_map_and_rqs()

Add a function to combine allocating tags and the associated requests,
and factor out common patterns to use this new function.

Some function only call blk_mq_alloc_map_and_rqs() now, but more
functionality will be added later.

Also make blk_mq_alloc_rq_map() and blk_mq_alloc_rqs() static since they
are only used in blk-mq.c, and finally rename some functions for
conciseness and consistency with other function names:
- __blk_mq_alloc_map_and_{request -> rqs}()
- blk_mq_alloc_{map_and_requests -> set_map_and_rqs}()
Suggested-by: default avatarMing Lei <ming.lei@redhat.com>
Signed-off-by: default avatarJohn Garry <john.garry@huawei.com>
Reviewed-by: default avatarHannes Reinecke <hare@suse.de>
Reviewed-by: default avatarMing Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/1633429419-228500-11-git-send-email-john.garry@huawei.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent a7e7388d
...@@ -519,21 +519,12 @@ static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q, ...@@ -519,21 +519,12 @@ static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q,
struct blk_mq_hw_ctx *hctx, struct blk_mq_hw_ctx *hctx,
unsigned int hctx_idx) unsigned int hctx_idx)
{ {
struct blk_mq_tag_set *set = q->tag_set; hctx->sched_tags = blk_mq_alloc_map_and_rqs(q->tag_set, hctx_idx,
int ret; q->nr_requests);
hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests,
set->reserved_tags, set->flags);
if (!hctx->sched_tags) if (!hctx->sched_tags)
return -ENOMEM; return -ENOMEM;
return 0;
ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests);
if (ret) {
blk_mq_free_rq_map(hctx->sched_tags, set->flags);
hctx->sched_tags = NULL;
}
return ret;
} }
/* called in queue's release handler, tagset has gone away */ /* called in queue's release handler, tagset has gone away */
......
...@@ -592,7 +592,6 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, ...@@ -592,7 +592,6 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
if (tdepth > tags->nr_tags) { if (tdepth > tags->nr_tags) {
struct blk_mq_tag_set *set = hctx->queue->tag_set; struct blk_mq_tag_set *set = hctx->queue->tag_set;
struct blk_mq_tags *new; struct blk_mq_tags *new;
bool ret;
if (!can_grow) if (!can_grow)
return -EINVAL; return -EINVAL;
...@@ -604,15 +603,9 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, ...@@ -604,15 +603,9 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
if (tdepth > MAX_SCHED_RQ) if (tdepth > MAX_SCHED_RQ)
return -EINVAL; return -EINVAL;
new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth, new = blk_mq_alloc_map_and_rqs(set, hctx->queue_num, tdepth);
tags->nr_reserved_tags, set->flags);
if (!new) if (!new)
return -ENOMEM; return -ENOMEM;
ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth);
if (ret) {
blk_mq_free_rq_map(new, set->flags);
return -ENOMEM;
}
blk_mq_free_rqs(set, *tagsptr, hctx->queue_num); blk_mq_free_rqs(set, *tagsptr, hctx->queue_num);
blk_mq_free_rq_map(*tagsptr, set->flags); blk_mq_free_rq_map(*tagsptr, set->flags);
......
...@@ -2392,7 +2392,7 @@ void blk_mq_free_rq_map(struct blk_mq_tags *tags, unsigned int flags) ...@@ -2392,7 +2392,7 @@ void blk_mq_free_rq_map(struct blk_mq_tags *tags, unsigned int flags)
blk_mq_free_tags(tags, flags); blk_mq_free_tags(tags, flags);
} }
struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, static struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
unsigned int hctx_idx, unsigned int hctx_idx,
unsigned int nr_tags, unsigned int nr_tags,
unsigned int reserved_tags, unsigned int reserved_tags,
...@@ -2444,7 +2444,8 @@ static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq, ...@@ -2444,7 +2444,8 @@ static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
return 0; return 0;
} }
int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, static int blk_mq_alloc_rqs(struct blk_mq_tag_set *set,
struct blk_mq_tags *tags,
unsigned int hctx_idx, unsigned int depth) unsigned int hctx_idx, unsigned int depth)
{ {
unsigned int i, j, entries_per_page, max_order = 4; unsigned int i, j, entries_per_page, max_order = 4;
...@@ -2856,25 +2857,34 @@ static void blk_mq_init_cpu_queues(struct request_queue *q, ...@@ -2856,25 +2857,34 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
} }
} }
static bool __blk_mq_alloc_map_and_request(struct blk_mq_tag_set *set, struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
int hctx_idx) unsigned int hctx_idx,
unsigned int depth)
{ {
unsigned int flags = set->flags; struct blk_mq_tags *tags;
int ret = 0; int ret;
set->tags[hctx_idx] = blk_mq_alloc_rq_map(set, hctx_idx, tags = blk_mq_alloc_rq_map(set, hctx_idx, depth, set->reserved_tags,
set->queue_depth, set->reserved_tags, flags); set->flags);
if (!set->tags[hctx_idx]) if (!tags)
return false; return NULL;
ret = blk_mq_alloc_rqs(set, set->tags[hctx_idx], hctx_idx, ret = blk_mq_alloc_rqs(set, tags, hctx_idx, depth);
if (ret) {
blk_mq_free_rq_map(tags, set->flags);
return NULL;
}
return tags;
}
static bool __blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
int hctx_idx)
{
set->tags[hctx_idx] = blk_mq_alloc_map_and_rqs(set, hctx_idx,
set->queue_depth); set->queue_depth);
if (!ret)
return true;
blk_mq_free_rq_map(set->tags[hctx_idx], flags); return set->tags[hctx_idx];
set->tags[hctx_idx] = NULL;
return false;
} }
static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set, static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
...@@ -2919,7 +2929,7 @@ static void blk_mq_map_swqueue(struct request_queue *q) ...@@ -2919,7 +2929,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
hctx_idx = set->map[j].mq_map[i]; hctx_idx = set->map[j].mq_map[i];
/* unmapped hw queue can be remapped after CPU topo changed */ /* unmapped hw queue can be remapped after CPU topo changed */
if (!set->tags[hctx_idx] && if (!set->tags[hctx_idx] &&
!__blk_mq_alloc_map_and_request(set, hctx_idx)) { !__blk_mq_alloc_map_and_rqs(set, hctx_idx)) {
/* /*
* If tags initialization fail for some hctx, * If tags initialization fail for some hctx,
* that hctx won't be brought online. In this * that hctx won't be brought online. In this
...@@ -3352,7 +3362,7 @@ static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) ...@@ -3352,7 +3362,7 @@ static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
int i; int i;
for (i = 0; i < set->nr_hw_queues; i++) { for (i = 0; i < set->nr_hw_queues; i++) {
if (!__blk_mq_alloc_map_and_request(set, i)) if (!__blk_mq_alloc_map_and_rqs(set, i))
goto out_unwind; goto out_unwind;
cond_resched(); cond_resched();
} }
...@@ -3371,7 +3381,7 @@ static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) ...@@ -3371,7 +3381,7 @@ static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
* may reduce the depth asked for, if memory is tight. set->queue_depth * may reduce the depth asked for, if memory is tight. set->queue_depth
* will be updated to reflect the allocated depth. * will be updated to reflect the allocated depth.
*/ */
static int blk_mq_alloc_map_and_requests(struct blk_mq_tag_set *set) static int blk_mq_alloc_set_map_and_rqs(struct blk_mq_tag_set *set)
{ {
unsigned int depth; unsigned int depth;
int err; int err;
...@@ -3537,7 +3547,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) ...@@ -3537,7 +3547,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
if (ret) if (ret)
goto out_free_mq_map; goto out_free_mq_map;
ret = blk_mq_alloc_map_and_requests(set); ret = blk_mq_alloc_set_map_and_rqs(set);
if (ret) if (ret)
goto out_free_mq_map; goto out_free_mq_map;
......
...@@ -55,12 +55,7 @@ void blk_mq_put_rq_ref(struct request *rq); ...@@ -55,12 +55,7 @@ void blk_mq_put_rq_ref(struct request *rq);
void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
unsigned int hctx_idx); unsigned int hctx_idx);
void blk_mq_free_rq_map(struct blk_mq_tags *tags, unsigned int flags); void blk_mq_free_rq_map(struct blk_mq_tags *tags, unsigned int flags);
struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
unsigned int hctx_idx,
unsigned int nr_tags,
unsigned int reserved_tags,
unsigned int flags);
int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
unsigned int hctx_idx, unsigned int depth); unsigned int hctx_idx, unsigned int depth);
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment