Commit cdef54dd authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

blk-mq: remove alloc_hctx and free_hctx methods

There is no need for drivers to control hardware context allocation
now that we do the context to node mapping in common code.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 75bb4625
...@@ -1335,21 +1335,6 @@ struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu) ...@@ -1335,21 +1335,6 @@ struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu)
} }
EXPORT_SYMBOL(blk_mq_map_queue); EXPORT_SYMBOL(blk_mq_map_queue);
struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *set,
unsigned int hctx_index,
int node)
{
return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, node);
}
EXPORT_SYMBOL(blk_mq_alloc_single_hw_queue);
void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *hctx,
unsigned int hctx_index)
{
kfree(hctx);
}
EXPORT_SYMBOL(blk_mq_free_single_hw_queue);
static void blk_mq_free_rq_map(struct blk_mq_tag_set *set, static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
struct blk_mq_tags *tags, unsigned int hctx_idx) struct blk_mq_tags *tags, unsigned int hctx_idx)
{ {
...@@ -1590,7 +1575,7 @@ static void blk_mq_free_hw_queues(struct request_queue *q, ...@@ -1590,7 +1575,7 @@ static void blk_mq_free_hw_queues(struct request_queue *q,
queue_for_each_hw_ctx(q, hctx, i) { queue_for_each_hw_ctx(q, hctx, i) {
free_cpumask_var(hctx->cpumask); free_cpumask_var(hctx->cpumask);
set->ops->free_hctx(hctx, i); kfree(hctx);
} }
} }
...@@ -1811,7 +1796,8 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) ...@@ -1811,7 +1796,8 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
for (i = 0; i < set->nr_hw_queues; i++) { for (i = 0; i < set->nr_hw_queues; i++) {
int node = blk_mq_hw_queue_to_node(map, i); int node = blk_mq_hw_queue_to_node(map, i);
hctxs[i] = set->ops->alloc_hctx(set, i, node); hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx),
GFP_KERNEL, node);
if (!hctxs[i]) if (!hctxs[i])
goto err_hctxs; goto err_hctxs;
...@@ -1898,7 +1884,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) ...@@ -1898,7 +1884,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
if (!hctxs[i]) if (!hctxs[i])
break; break;
free_cpumask_var(hctxs[i]->cpumask); free_cpumask_var(hctxs[i]->cpumask);
set->ops->free_hctx(hctxs[i], i); kfree(hctxs[i]);
} }
err_map: err_map:
kfree(hctxs); kfree(hctxs);
...@@ -1983,9 +1969,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) ...@@ -1983,9 +1969,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
return -EINVAL; return -EINVAL;
if (!set->nr_hw_queues || if (!set->nr_hw_queues || !set->ops->queue_rq || !set->ops->map_queue)
!set->ops->queue_rq || !set->ops->map_queue ||
!set->ops->alloc_hctx || !set->ops->free_hctx)
return -EINVAL; return -EINVAL;
......
...@@ -321,18 +321,6 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq) ...@@ -321,18 +321,6 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq)
return BLK_MQ_RQ_QUEUE_OK; return BLK_MQ_RQ_QUEUE_OK;
} }
static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_tag_set *set,
unsigned int hctx_index,
int node)
{
return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, node);
}
static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index)
{
kfree(hctx);
}
static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq) static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
{ {
BUG_ON(!nullb); BUG_ON(!nullb);
...@@ -360,17 +348,6 @@ static struct blk_mq_ops null_mq_ops = { ...@@ -360,17 +348,6 @@ static struct blk_mq_ops null_mq_ops = {
.map_queue = blk_mq_map_queue, .map_queue = blk_mq_map_queue,
.init_hctx = null_init_hctx, .init_hctx = null_init_hctx,
.complete = null_softirq_done_fn, .complete = null_softirq_done_fn,
.alloc_hctx = blk_mq_alloc_single_hw_queue,
.free_hctx = blk_mq_free_single_hw_queue,
};
static struct blk_mq_ops null_mq_ops_pernode = {
.queue_rq = null_queue_rq,
.map_queue = blk_mq_map_queue,
.init_hctx = null_init_hctx,
.complete = null_softirq_done_fn,
.alloc_hctx = null_alloc_hctx,
.free_hctx = null_free_hctx,
}; };
static void null_del_dev(struct nullb *nullb) static void null_del_dev(struct nullb *nullb)
...@@ -496,10 +473,7 @@ static int null_add_dev(void) ...@@ -496,10 +473,7 @@ static int null_add_dev(void)
goto out_free_nullb; goto out_free_nullb;
if (queue_mode == NULL_Q_MQ) { if (queue_mode == NULL_Q_MQ) {
if (use_per_node_hctx) nullb->tag_set.ops = &null_mq_ops;
nullb->tag_set.ops = &null_mq_ops_pernode;
else
nullb->tag_set.ops = &null_mq_ops;
nullb->tag_set.nr_hw_queues = submit_queues; nullb->tag_set.nr_hw_queues = submit_queues;
nullb->tag_set.queue_depth = hw_queue_depth; nullb->tag_set.queue_depth = hw_queue_depth;
nullb->tag_set.numa_node = home_node; nullb->tag_set.numa_node = home_node;
......
...@@ -497,8 +497,6 @@ static int virtblk_init_request(void *data, struct request *rq, ...@@ -497,8 +497,6 @@ static int virtblk_init_request(void *data, struct request *rq,
static struct blk_mq_ops virtio_mq_ops = { static struct blk_mq_ops virtio_mq_ops = {
.queue_rq = virtio_queue_rq, .queue_rq = virtio_queue_rq,
.map_queue = blk_mq_map_queue, .map_queue = blk_mq_map_queue,
.alloc_hctx = blk_mq_alloc_single_hw_queue,
.free_hctx = blk_mq_free_single_hw_queue,
.complete = virtblk_request_done, .complete = virtblk_request_done,
.init_request = virtblk_init_request, .init_request = virtblk_init_request,
}; };
......
...@@ -79,9 +79,6 @@ struct blk_mq_tag_set { ...@@ -79,9 +79,6 @@ struct blk_mq_tag_set {
typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *); typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *);
typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int); typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int);
typedef struct blk_mq_hw_ctx *(alloc_hctx_fn)(struct blk_mq_tag_set *,
unsigned int, int);
typedef void (free_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
typedef int (init_request_fn)(void *, struct request *, unsigned int, typedef int (init_request_fn)(void *, struct request *, unsigned int,
...@@ -107,12 +104,6 @@ struct blk_mq_ops { ...@@ -107,12 +104,6 @@ struct blk_mq_ops {
softirq_done_fn *complete; softirq_done_fn *complete;
/*
* Override for hctx allocations (should probably go)
*/
alloc_hctx_fn *alloc_hctx;
free_hctx_fn *free_hctx;
/* /*
* Called when the block layer side of a hardware queue has been * Called when the block layer side of a hardware queue has been
* set up, allowing the driver to allocate/init matching structures. * set up, allowing the driver to allocate/init matching structures.
...@@ -166,7 +157,6 @@ struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); ...@@ -166,7 +157,6 @@ struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int);
void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int);
void blk_mq_end_io(struct request *rq, int error); void blk_mq_end_io(struct request *rq, int error);
void __blk_mq_end_io(struct request *rq, int error); void __blk_mq_end_io(struct request *rq, int error);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment