Commit d3484991 authored by Jens Axboe's avatar Jens Axboe

blk-mq-sched: allow setting of default IO scheduler

Add Kconfig entries to manage what devices get assigned an MQ
scheduler, and add a blk-mq flag for drivers to opt out of scheduling.
The latter is useful for admin type queues that still allocate a blk-mq
queue and tag set, but aren't use for normal IO.
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
Reviewed-by: default avatarBart Van Assche <bart.vanassche@sandisk.com>
Reviewed-by: default avatarOmar Sandoval <osandov@fb.com>
parent 945ffb60
...@@ -32,12 +32,6 @@ config IOSCHED_CFQ ...@@ -32,12 +32,6 @@ config IOSCHED_CFQ
This is the default I/O scheduler. This is the default I/O scheduler.
config MQ_IOSCHED_DEADLINE
tristate "MQ deadline I/O scheduler"
default y
---help---
MQ version of the deadline IO scheduler.
config CFQ_GROUP_IOSCHED config CFQ_GROUP_IOSCHED
bool "CFQ Group Scheduling support" bool "CFQ Group Scheduling support"
depends on IOSCHED_CFQ && BLK_CGROUP depends on IOSCHED_CFQ && BLK_CGROUP
...@@ -69,6 +63,56 @@ config DEFAULT_IOSCHED ...@@ -69,6 +63,56 @@ config DEFAULT_IOSCHED
default "cfq" if DEFAULT_CFQ default "cfq" if DEFAULT_CFQ
default "noop" if DEFAULT_NOOP default "noop" if DEFAULT_NOOP
config MQ_IOSCHED_DEADLINE
tristate "MQ deadline I/O scheduler"
default y
---help---
MQ version of the deadline IO scheduler.
config MQ_IOSCHED_NONE
bool
default y
choice
prompt "Default single-queue blk-mq I/O scheduler"
default DEFAULT_SQ_NONE
help
Select the I/O scheduler which will be used by default for blk-mq
managed block devices with a single queue.
config DEFAULT_SQ_DEADLINE
bool "MQ Deadline" if MQ_IOSCHED_DEADLINE=y
config DEFAULT_SQ_NONE
bool "None"
endchoice
config DEFAULT_SQ_IOSCHED
string
default "mq-deadline" if DEFAULT_SQ_DEADLINE
default "none" if DEFAULT_SQ_NONE
choice
prompt "Default multi-queue blk-mq I/O scheduler"
default DEFAULT_MQ_NONE
help
Select the I/O scheduler which will be used by default for blk-mq
managed block devices with multiple queues.
config DEFAULT_MQ_DEADLINE
bool "MQ Deadline" if MQ_IOSCHED_DEADLINE=y
config DEFAULT_MQ_NONE
bool "None"
endchoice
config DEFAULT_MQ_IOSCHED
string
default "mq-deadline" if DEFAULT_MQ_DEADLINE
default "none" if DEFAULT_MQ_NONE
endmenu endmenu
endif endif
...@@ -366,3 +366,23 @@ void blk_mq_sched_teardown(struct request_queue *q) ...@@ -366,3 +366,23 @@ void blk_mq_sched_teardown(struct request_queue *q)
queue_for_each_hw_ctx(q, hctx, i) queue_for_each_hw_ctx(q, hctx, i)
blk_mq_sched_free_tags(set, hctx, i); blk_mq_sched_free_tags(set, hctx, i);
} }
int blk_mq_sched_init(struct request_queue *q)
{
int ret;
#if defined(CONFIG_DEFAULT_SQ_NONE)
if (q->nr_hw_queues == 1)
return 0;
#endif
#if defined(CONFIG_DEFAULT_MQ_NONE)
if (q->nr_hw_queues > 1)
return 0;
#endif
mutex_lock(&q->sysfs_lock);
ret = elevator_init(q, NULL);
mutex_unlock(&q->sysfs_lock);
return ret;
}
...@@ -28,6 +28,8 @@ void blk_mq_sched_move_to_dispatch(struct blk_mq_hw_ctx *hctx, ...@@ -28,6 +28,8 @@ void blk_mq_sched_move_to_dispatch(struct blk_mq_hw_ctx *hctx,
int blk_mq_sched_setup(struct request_queue *q); int blk_mq_sched_setup(struct request_queue *q);
void blk_mq_sched_teardown(struct request_queue *q); void blk_mq_sched_teardown(struct request_queue *q);
int blk_mq_sched_init(struct request_queue *q);
static inline bool static inline bool
blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio) blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
{ {
......
...@@ -2285,6 +2285,14 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, ...@@ -2285,6 +2285,14 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
mutex_unlock(&all_q_mutex); mutex_unlock(&all_q_mutex);
put_online_cpus(); put_online_cpus();
if (!(set->flags & BLK_MQ_F_NO_SCHED)) {
int ret;
ret = blk_mq_sched_init(q);
if (ret)
return ERR_PTR(ret);
}
return q; return q;
err_hctxs: err_hctxs:
......
...@@ -219,7 +219,13 @@ int elevator_init(struct request_queue *q, char *name) ...@@ -219,7 +219,13 @@ int elevator_init(struct request_queue *q, char *name)
} }
if (!e) { if (!e) {
e = elevator_get(CONFIG_DEFAULT_IOSCHED, false); if (q->mq_ops && q->nr_hw_queues == 1)
e = elevator_get(CONFIG_DEFAULT_SQ_IOSCHED, false);
else if (q->mq_ops)
e = elevator_get(CONFIG_DEFAULT_MQ_IOSCHED, false);
else
e = elevator_get(CONFIG_DEFAULT_IOSCHED, false);
if (!e) { if (!e) {
printk(KERN_ERR printk(KERN_ERR
"Default I/O scheduler not found. " \ "Default I/O scheduler not found. " \
......
...@@ -1181,6 +1181,7 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev) ...@@ -1181,6 +1181,7 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev)
dev->admin_tagset.timeout = ADMIN_TIMEOUT; dev->admin_tagset.timeout = ADMIN_TIMEOUT;
dev->admin_tagset.numa_node = dev_to_node(dev->dev); dev->admin_tagset.numa_node = dev_to_node(dev->dev);
dev->admin_tagset.cmd_size = nvme_cmd_size(dev); dev->admin_tagset.cmd_size = nvme_cmd_size(dev);
dev->admin_tagset.flags = BLK_MQ_F_NO_SCHED;
dev->admin_tagset.driver_data = dev; dev->admin_tagset.driver_data = dev;
if (blk_mq_alloc_tag_set(&dev->admin_tagset)) if (blk_mq_alloc_tag_set(&dev->admin_tagset))
......
...@@ -153,6 +153,7 @@ enum { ...@@ -153,6 +153,7 @@ enum {
BLK_MQ_F_SG_MERGE = 1 << 2, BLK_MQ_F_SG_MERGE = 1 << 2,
BLK_MQ_F_DEFER_ISSUE = 1 << 4, BLK_MQ_F_DEFER_ISSUE = 1 << 4,
BLK_MQ_F_BLOCKING = 1 << 5, BLK_MQ_F_BLOCKING = 1 << 5,
BLK_MQ_F_NO_SCHED = 1 << 6,
BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
BLK_MQ_F_ALLOC_POLICY_BITS = 1, BLK_MQ_F_ALLOC_POLICY_BITS = 1,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment