Commit e0e827b9 authored by Raghavendra K T's avatar Raghavendra K T Committed by Jens Axboe

blk-mq: Reuse hardware context cpumask for tags

hctx->cpumask is already populated and let the tag cpumask follow that
instead of going through a new for loop.
Signed-off-by: default avatarRaghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
Reviewed-by: default avatarSagi Grimberg <sagig@mellanox.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 6f3b0e8b
...@@ -1850,6 +1850,7 @@ static void blk_mq_map_swqueue(struct request_queue *q, ...@@ -1850,6 +1850,7 @@ static void blk_mq_map_swqueue(struct request_queue *q,
hctx->tags = set->tags[i]; hctx->tags = set->tags[i];
WARN_ON(!hctx->tags); WARN_ON(!hctx->tags);
cpumask_copy(hctx->tags->cpumask, hctx->cpumask);
/* /*
* Set the map size to the number of mapped software queues. * Set the map size to the number of mapped software queues.
* This is more accurate and more efficient than looping * This is more accurate and more efficient than looping
...@@ -1863,14 +1864,6 @@ static void blk_mq_map_swqueue(struct request_queue *q, ...@@ -1863,14 +1864,6 @@ static void blk_mq_map_swqueue(struct request_queue *q,
hctx->next_cpu = cpumask_first(hctx->cpumask); hctx->next_cpu = cpumask_first(hctx->cpumask);
hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
} }
queue_for_each_ctx(q, ctx, i) {
if (!cpumask_test_cpu(i, online_mask))
continue;
hctx = q->mq_ops->map_queue(q, i);
cpumask_set_cpu(i, hctx->tags->cpumask);
}
} }
static void queue_set_hctx_shared(struct request_queue *q, bool shared) static void queue_set_hctx_shared(struct request_queue *q, bool shared)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment