Commit 0da73d00 authored by Minwoo Im's avatar Minwoo Im Committed by Jens Axboe

blk-mq: code clean-up by adding an API to clear set->mq_map

set->mq_map is now currently cleared if something goes wrong when
establishing a queue map in blk-mq-pci.c.  It's also cleared before
updating a queue map in blk_mq_update_queue_map().

This patch provides an API to clear set->mq_map to make it clear.
Signed-off-by: default avatarMinwoo Im <minwoo.im.dev@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 5efac89c
...@@ -17,6 +17,8 @@ ...@@ -17,6 +17,8 @@
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/module.h> #include <linux/module.h>
#include "blk-mq.h"
/** /**
* blk_mq_pci_map_queues - provide a default queue mapping for PCI device * blk_mq_pci_map_queues - provide a default queue mapping for PCI device
* @set: tagset to provide the mapping for * @set: tagset to provide the mapping for
...@@ -48,8 +50,7 @@ int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev, ...@@ -48,8 +50,7 @@ int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev,
fallback: fallback:
WARN_ON_ONCE(set->nr_hw_queues > 1); WARN_ON_ONCE(set->nr_hw_queues > 1);
for_each_possible_cpu(cpu) blk_mq_clear_mq_map(set);
set->mq_map[cpu] = 0;
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(blk_mq_pci_map_queues); EXPORT_SYMBOL_GPL(blk_mq_pci_map_queues);
...@@ -2683,7 +2683,6 @@ static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) ...@@ -2683,7 +2683,6 @@ static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
static int blk_mq_update_queue_map(struct blk_mq_tag_set *set) static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
{ {
if (set->ops->map_queues) { if (set->ops->map_queues) {
int cpu;
/* /*
* transport .map_queues is usually done in the following * transport .map_queues is usually done in the following
* way: * way:
...@@ -2698,8 +2697,7 @@ static int blk_mq_update_queue_map(struct blk_mq_tag_set *set) ...@@ -2698,8 +2697,7 @@ static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
* killing stale mapping since one CPU may not be mapped * killing stale mapping since one CPU may not be mapped
* to any hw queue. * to any hw queue.
*/ */
for_each_possible_cpu(cpu) blk_mq_clear_mq_map(set);
set->mq_map[cpu] = 0;
return set->ops->map_queues(set); return set->ops->map_queues(set);
} else } else
......
...@@ -202,4 +202,12 @@ static inline void blk_mq_put_driver_tag(struct request *rq) ...@@ -202,4 +202,12 @@ static inline void blk_mq_put_driver_tag(struct request *rq)
__blk_mq_put_driver_tag(hctx, rq); __blk_mq_put_driver_tag(hctx, rq);
} }
static inline void blk_mq_clear_mq_map(struct blk_mq_tag_set *set)
{
int cpu;
for_each_possible_cpu(cpu)
set->mq_map[cpu] = 0;
}
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment