Commit 4412efec authored by Ming Lei's avatar Ming Lei Committed by Jens Axboe

Revert "blk-mq: remove code for dealing with remapping queue"

This reverts commit 37c7c6c7.

Turns out some drivers(most are FC drivers) may not use managed
IRQ affinity, and has their customized .map_queues meantime, so
still keep this code for avoiding regression.
Reported-by: default avatarLaurence Oberman <loberman@redhat.com>
Tested-by: default avatarLaurence Oberman <loberman@redhat.com>
Tested-by: default avatarChristian Borntraeger <borntraeger@de.ibm.com>
Tested-by: default avatarStefan Haberland <sth@linux.vnet.ibm.com>
Cc: Ewan Milne <emilne@redhat.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: default avatarMing Lei <ming.lei@redhat.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent fe644072
...@@ -2336,7 +2336,7 @@ static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set, ...@@ -2336,7 +2336,7 @@ static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
static void blk_mq_map_swqueue(struct request_queue *q) static void blk_mq_map_swqueue(struct request_queue *q)
{ {
unsigned int i; unsigned int i, hctx_idx;
struct blk_mq_hw_ctx *hctx; struct blk_mq_hw_ctx *hctx;
struct blk_mq_ctx *ctx; struct blk_mq_ctx *ctx;
struct blk_mq_tag_set *set = q->tag_set; struct blk_mq_tag_set *set = q->tag_set;
...@@ -2353,8 +2353,23 @@ static void blk_mq_map_swqueue(struct request_queue *q) ...@@ -2353,8 +2353,23 @@ static void blk_mq_map_swqueue(struct request_queue *q)
/* /*
* Map software to hardware queues. * Map software to hardware queues.
*
* If the cpu isn't present, the cpu is mapped to first hctx.
*/ */
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
hctx_idx = q->mq_map[i];
/* unmapped hw queue can be remapped after CPU topo changed */
if (!set->tags[hctx_idx] &&
!__blk_mq_alloc_rq_map(set, hctx_idx)) {
/*
* If tags initialization fail for some hctx,
* that hctx won't be brought online. In this
* case, remap the current ctx to hctx[0] which
* is guaranteed to always have tags allocated
*/
q->mq_map[i] = 0;
}
ctx = per_cpu_ptr(q->queue_ctx, i); ctx = per_cpu_ptr(q->queue_ctx, i);
hctx = blk_mq_map_queue(q, i); hctx = blk_mq_map_queue(q, i);
...@@ -2366,8 +2381,21 @@ static void blk_mq_map_swqueue(struct request_queue *q) ...@@ -2366,8 +2381,21 @@ static void blk_mq_map_swqueue(struct request_queue *q)
mutex_unlock(&q->sysfs_lock); mutex_unlock(&q->sysfs_lock);
queue_for_each_hw_ctx(q, hctx, i) { queue_for_each_hw_ctx(q, hctx, i) {
/* every hctx should get mapped by at least one CPU */ /*
WARN_ON(!hctx->nr_ctx); * If no software queues are mapped to this hardware queue,
* disable it and free the request entries.
*/
if (!hctx->nr_ctx) {
/* Never unmap queue 0. We need it as a
* fallback in case of a new remap fails
* allocation
*/
if (i && set->tags[i])
blk_mq_free_map_and_requests(set, i);
hctx->tags = NULL;
continue;
}
hctx->tags = set->tags[i]; hctx->tags = set->tags[i];
WARN_ON(!hctx->tags); WARN_ON(!hctx->tags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment