Commit 6917ff0b authored by Omar Sandoval's avatar Omar Sandoval Committed by Jens Axboe

blk-mq-sched: refactor scheduler initialization

Preparation cleanup for the next couple of fixes, push
blk_mq_sched_setup() and e->ops.mq.init_sched() into a helper.
Signed-off-by: default avatarOmar Sandoval <osandov@fb.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 81380ca1
...@@ -432,11 +432,45 @@ static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set, ...@@ -432,11 +432,45 @@ static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set,
} }
} }
int blk_mq_sched_setup(struct request_queue *q) static int blk_mq_sched_alloc_tags(struct request_queue *q,
struct blk_mq_hw_ctx *hctx,
unsigned int hctx_idx)
{
struct blk_mq_tag_set *set = q->tag_set;
int ret;
hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests,
set->reserved_tags);
if (!hctx->sched_tags)
return -ENOMEM;
ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests);
if (ret)
blk_mq_sched_free_tags(set, hctx, hctx_idx);
return ret;
}
void blk_mq_sched_teardown(struct request_queue *q)
{ {
struct blk_mq_tag_set *set = q->tag_set; struct blk_mq_tag_set *set = q->tag_set;
struct blk_mq_hw_ctx *hctx; struct blk_mq_hw_ctx *hctx;
int ret, i; int i;
queue_for_each_hw_ctx(q, hctx, i)
blk_mq_sched_free_tags(set, hctx, i);
}
int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
{
struct blk_mq_hw_ctx *hctx;
unsigned int i;
int ret;
if (!e) {
q->elevator = NULL;
return 0;
}
/* /*
* Default to 256, since we don't split into sync/async like the * Default to 256, since we don't split into sync/async like the
...@@ -444,49 +478,21 @@ int blk_mq_sched_setup(struct request_queue *q) ...@@ -444,49 +478,21 @@ int blk_mq_sched_setup(struct request_queue *q)
*/ */
q->nr_requests = 2 * BLKDEV_MAX_RQ; q->nr_requests = 2 * BLKDEV_MAX_RQ;
/*
* We're switching to using an IO scheduler, so setup the hctx
* scheduler tags and switch the request map from the regular
* tags to scheduler tags. First allocate what we need, so we
* can safely fail and fallback, if needed.
*/
ret = 0;
queue_for_each_hw_ctx(q, hctx, i) { queue_for_each_hw_ctx(q, hctx, i) {
hctx->sched_tags = blk_mq_alloc_rq_map(set, i, ret = blk_mq_sched_alloc_tags(q, hctx, i);
q->nr_requests, set->reserved_tags);
if (!hctx->sched_tags) {
ret = -ENOMEM;
break;
}
ret = blk_mq_alloc_rqs(set, hctx->sched_tags, i, q->nr_requests);
if (ret) if (ret)
break; goto err;
} }
/* ret = e->ops.mq.init_sched(q, e);
* If we failed, free what we did allocate if (ret)
*/ goto err;
if (ret) {
queue_for_each_hw_ctx(q, hctx, i) {
if (!hctx->sched_tags)
continue;
blk_mq_sched_free_tags(set, hctx, i);
}
return ret;
}
return 0; return 0;
}
void blk_mq_sched_teardown(struct request_queue *q) err:
{ blk_mq_sched_teardown(q);
struct blk_mq_tag_set *set = q->tag_set; return ret;
struct blk_mq_hw_ctx *hctx;
int i;
queue_for_each_hw_ctx(q, hctx, i)
blk_mq_sched_free_tags(set, hctx, i);
} }
int blk_mq_sched_init(struct request_queue *q) int blk_mq_sched_init(struct request_queue *q)
......
...@@ -32,7 +32,7 @@ void blk_mq_sched_move_to_dispatch(struct blk_mq_hw_ctx *hctx, ...@@ -32,7 +32,7 @@ void blk_mq_sched_move_to_dispatch(struct blk_mq_hw_ctx *hctx,
struct list_head *rq_list, struct list_head *rq_list,
struct request *(*get_rq)(struct blk_mq_hw_ctx *)); struct request *(*get_rq)(struct blk_mq_hw_ctx *));
int blk_mq_sched_setup(struct request_queue *q); int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e);
void blk_mq_sched_teardown(struct request_queue *q); void blk_mq_sched_teardown(struct request_queue *q);
int blk_mq_sched_init(struct request_queue *q); int blk_mq_sched_init(struct request_queue *q);
......
...@@ -242,17 +242,12 @@ int elevator_init(struct request_queue *q, char *name) ...@@ -242,17 +242,12 @@ int elevator_init(struct request_queue *q, char *name)
} }
} }
if (e->uses_mq) { if (e->uses_mq)
err = blk_mq_sched_setup(q); err = blk_mq_init_sched(q, e);
if (!err) else
err = e->ops.mq.init_sched(q, e);
} else
err = e->ops.sq.elevator_init_fn(q, e); err = e->ops.sq.elevator_init_fn(q, e);
if (err) { if (err)
if (e->uses_mq)
blk_mq_sched_teardown(q);
elevator_put(e); elevator_put(e);
}
return err; return err;
} }
EXPORT_SYMBOL(elevator_init); EXPORT_SYMBOL(elevator_init);
...@@ -987,21 +982,18 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) ...@@ -987,21 +982,18 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
} }
/* allocate, init and register new elevator */ /* allocate, init and register new elevator */
if (new_e) { if (q->mq_ops)
if (new_e->uses_mq) { err = blk_mq_init_sched(q, new_e);
err = blk_mq_sched_setup(q); else
if (!err) err = new_e->ops.sq.elevator_init_fn(q, new_e);
err = new_e->ops.mq.init_sched(q, new_e); if (err)
} else goto fail_init;
err = new_e->ops.sq.elevator_init_fn(q, new_e);
if (err)
goto fail_init;
if (new_e) {
err = elv_register_queue(q); err = elv_register_queue(q);
if (err) if (err)
goto fail_register; goto fail_register;
} else }
q->elevator = NULL;
/* done, kill the old one and finish */ /* done, kill the old one and finish */
if (old) { if (old) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment