Commit 24d2f903 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

blk-mq: split out tag initialization, support shared tags

Add a new blk_mq_tag_set structure that gets set up before we initialize
the queue.  A single blk_mq_tag_set structure can be shared by multiple
queues.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>

Modular export of blk_mq_{alloc,free}_tagset added by me.
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent ed44832d
......@@ -80,17 +80,17 @@ int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues)
return 0;
}
unsigned int *blk_mq_make_queue_map(struct blk_mq_reg *reg)
unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set)
{
unsigned int *map;
/* If cpus are offline, map them to first hctx */
map = kzalloc_node(sizeof(*map) * num_possible_cpus(), GFP_KERNEL,
reg->numa_node);
set->numa_node);
if (!map)
return NULL;
if (!blk_mq_update_queue_map(map, reg->nr_hw_queues))
if (!blk_mq_update_queue_map(map, set->nr_hw_queues))
return map;
kfree(map);
......
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/percpu_ida.h>
#include <linux/blk-mq.h>
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-tag.h"
/*
* Per tagged queue (tag address space) map
*/
struct blk_mq_tags {
unsigned int nr_tags;
unsigned int nr_reserved_tags;
unsigned int nr_batch_move;
unsigned int nr_max_cache;
struct percpu_ida free_tags;
struct percpu_ida reserved_tags;
};
void blk_mq_wait_for_tags(struct blk_mq_tags *tags)
{
int tag = blk_mq_get_tag(tags, __GFP_WAIT, false);
......
#ifndef INT_BLK_MQ_TAG_H
#define INT_BLK_MQ_TAG_H
struct blk_mq_tags;
#include <linux/percpu_ida.h>
/*
* Tag address space map.
*/
struct blk_mq_tags {
unsigned int nr_tags;
unsigned int nr_reserved_tags;
unsigned int nr_batch_move;
unsigned int nr_max_cache;
struct percpu_ida free_tags;
struct percpu_ida reserved_tags;
struct request **rqs;
struct list_head page_list;
};
extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int reserved_tags, int node);
extern void blk_mq_free_tags(struct blk_mq_tags *tags);
......
This diff is collapsed.
#ifndef INT_BLK_MQ_H
#define INT_BLK_MQ_H
struct blk_mq_tag_set;
struct blk_mq_ctx {
struct {
spinlock_t lock;
......@@ -46,8 +48,7 @@ void blk_mq_disable_hotplug(void);
/*
* CPU -> queue mappings
*/
struct blk_mq_reg;
extern unsigned int *blk_mq_make_queue_map(struct blk_mq_reg *reg);
extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set);
extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues);
void blk_mq_add_timer(struct request *rq);
......
......@@ -32,6 +32,7 @@ struct nullb {
unsigned int index;
struct request_queue *q;
struct gendisk *disk;
struct blk_mq_tag_set tag_set;
struct hrtimer timer;
unsigned int queue_depth;
spinlock_t lock;
......@@ -320,10 +321,11 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq)
return BLK_MQ_RQ_QUEUE_OK;
}
static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_reg *reg, unsigned int hctx_index)
static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_tag_set *set,
unsigned int hctx_index)
{
int b_size = DIV_ROUND_UP(reg->nr_hw_queues, nr_online_nodes);
int tip = (reg->nr_hw_queues % nr_online_nodes);
int b_size = DIV_ROUND_UP(set->nr_hw_queues, nr_online_nodes);
int tip = (set->nr_hw_queues % nr_online_nodes);
int node = 0, i, n;
/*
......@@ -338,7 +340,7 @@ static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_reg *reg, unsigned in
tip--;
if (!tip)
b_size = reg->nr_hw_queues / nr_online_nodes;
b_size = set->nr_hw_queues / nr_online_nodes;
}
}
......@@ -387,13 +389,17 @@ static struct blk_mq_ops null_mq_ops = {
.map_queue = blk_mq_map_queue,
.init_hctx = null_init_hctx,
.complete = null_softirq_done_fn,
.alloc_hctx = blk_mq_alloc_single_hw_queue,
.free_hctx = blk_mq_free_single_hw_queue,
};
static struct blk_mq_reg null_mq_reg = {
.ops = &null_mq_ops,
.queue_depth = 64,
.cmd_size = sizeof(struct nullb_cmd),
.flags = BLK_MQ_F_SHOULD_MERGE,
static struct blk_mq_ops null_mq_ops_pernode = {
.queue_rq = null_queue_rq,
.map_queue = blk_mq_map_queue,
.init_hctx = null_init_hctx,
.complete = null_softirq_done_fn,
.alloc_hctx = null_alloc_hctx,
.free_hctx = null_free_hctx,
};
static void null_del_dev(struct nullb *nullb)
......@@ -402,6 +408,8 @@ static void null_del_dev(struct nullb *nullb)
del_gendisk(nullb->disk);
blk_cleanup_queue(nullb->q);
if (queue_mode == NULL_Q_MQ)
blk_mq_free_tag_set(&nullb->tag_set);
put_disk(nullb->disk);
kfree(nullb);
}
......@@ -506,7 +514,7 @@ static int null_add_dev(void)
nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node);
if (!nullb)
return -ENOMEM;
goto out;
spin_lock_init(&nullb->lock);
......@@ -514,49 +522,47 @@ static int null_add_dev(void)
submit_queues = nr_online_nodes;
if (setup_queues(nullb))
goto err;
goto out_free_nullb;
if (queue_mode == NULL_Q_MQ) {
null_mq_reg.numa_node = home_node;
null_mq_reg.queue_depth = hw_queue_depth;
null_mq_reg.nr_hw_queues = submit_queues;
if (use_per_node_hctx) {
null_mq_reg.ops->alloc_hctx = null_alloc_hctx;
null_mq_reg.ops->free_hctx = null_free_hctx;
} else {
null_mq_reg.ops->alloc_hctx = blk_mq_alloc_single_hw_queue;
null_mq_reg.ops->free_hctx = blk_mq_free_single_hw_queue;
}
nullb->q = blk_mq_init_queue(&null_mq_reg, nullb);
if (use_per_node_hctx)
nullb->tag_set.ops = &null_mq_ops_pernode;
else
nullb->tag_set.ops = &null_mq_ops;
nullb->tag_set.nr_hw_queues = submit_queues;
nullb->tag_set.queue_depth = hw_queue_depth;
nullb->tag_set.numa_node = home_node;
nullb->tag_set.cmd_size = sizeof(struct nullb_cmd);
nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
nullb->tag_set.driver_data = nullb;
if (blk_mq_alloc_tag_set(&nullb->tag_set))
goto out_cleanup_queues;
nullb->q = blk_mq_init_queue(&nullb->tag_set);
if (!nullb->q)
goto out_cleanup_tags;
} else if (queue_mode == NULL_Q_BIO) {
nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
if (!nullb->q)
goto out_cleanup_queues;
blk_queue_make_request(nullb->q, null_queue_bio);
init_driver_queues(nullb);
} else {
nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
if (!nullb->q)
goto out_cleanup_queues;
blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
if (nullb->q)
blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
init_driver_queues(nullb);
}
if (!nullb->q)
goto queue_fail;
nullb->q->queuedata = nullb;
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
disk = nullb->disk = alloc_disk_node(1, home_node);
if (!disk) {
queue_fail:
blk_cleanup_queue(nullb->q);
cleanup_queues(nullb);
err:
kfree(nullb);
return -ENOMEM;
}
if (!disk)
goto out_cleanup_blk_queue;
mutex_lock(&lock);
list_add_tail(&nullb->list, &nullb_list);
......@@ -579,6 +585,18 @@ static int null_add_dev(void)
sprintf(disk->disk_name, "nullb%d", nullb->index);
add_disk(disk);
return 0;
out_cleanup_blk_queue:
blk_cleanup_queue(nullb->q);
out_cleanup_tags:
if (queue_mode == NULL_Q_MQ)
blk_mq_free_tag_set(&nullb->tag_set);
out_cleanup_queues:
cleanup_queues(nullb);
out_free_nullb:
kfree(nullb);
out:
return -ENOMEM;
}
static int __init null_init(void)
......
......@@ -30,6 +30,9 @@ struct virtio_blk
/* The disk structure for the kernel. */
struct gendisk *disk;
/* Block layer tags. */
struct blk_mq_tag_set tag_set;
/* Process context for config space updates */
struct work_struct config_work;
......@@ -480,8 +483,9 @@ static const struct device_attribute dev_attr_cache_type_rw =
__ATTR(cache_type, S_IRUGO|S_IWUSR,
virtblk_cache_type_show, virtblk_cache_type_store);
static int virtblk_init_request(void *data, struct blk_mq_hw_ctx *hctx,
struct request *rq, unsigned int nr)
static int virtblk_init_request(void *data, struct request *rq,
unsigned int hctx_idx, unsigned int request_idx,
unsigned int numa_node)
{
struct virtio_blk *vblk = data;
struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq);
......@@ -495,18 +499,12 @@ static struct blk_mq_ops virtio_mq_ops = {
.map_queue = blk_mq_map_queue,
.alloc_hctx = blk_mq_alloc_single_hw_queue,
.free_hctx = blk_mq_free_single_hw_queue,
.init_request = virtblk_init_request,
.complete = virtblk_request_done,
.init_request = virtblk_init_request,
};
static struct blk_mq_reg virtio_mq_reg = {
.ops = &virtio_mq_ops,
.nr_hw_queues = 1,
.queue_depth = 0, /* Set in virtblk_probe */
.numa_node = NUMA_NO_NODE,
.flags = BLK_MQ_F_SHOULD_MERGE,
};
module_param_named(queue_depth, virtio_mq_reg.queue_depth, uint, 0444);
static unsigned int virtblk_queue_depth;
module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
static int virtblk_probe(struct virtio_device *vdev)
{
......@@ -562,20 +560,32 @@ static int virtblk_probe(struct virtio_device *vdev)
}
/* Default queue sizing is to fill the ring. */
if (!virtio_mq_reg.queue_depth) {
virtio_mq_reg.queue_depth = vblk->vq->num_free;
if (!virtblk_queue_depth) {
virtblk_queue_depth = vblk->vq->num_free;
/* ... but without indirect descs, we use 2 descs per req */
if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
virtio_mq_reg.queue_depth /= 2;
virtblk_queue_depth /= 2;
}
virtio_mq_reg.cmd_size =
memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
vblk->tag_set.ops = &virtio_mq_ops;
vblk->tag_set.nr_hw_queues = 1;
vblk->tag_set.queue_depth = virtblk_queue_depth;
vblk->tag_set.numa_node = NUMA_NO_NODE;
vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
vblk->tag_set.cmd_size =
sizeof(struct virtblk_req) +
sizeof(struct scatterlist) * sg_elems;
vblk->tag_set.driver_data = vblk;
q = vblk->disk->queue = blk_mq_init_queue(&virtio_mq_reg, vblk);
err = blk_mq_alloc_tag_set(&vblk->tag_set);
if (err)
goto out_put_disk;
q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set);
if (!q) {
err = -ENOMEM;
goto out_put_disk;
goto out_free_tags;
}
q->queuedata = vblk;
......@@ -678,6 +688,8 @@ static int virtblk_probe(struct virtio_device *vdev)
out_del_disk:
del_gendisk(vblk->disk);
blk_cleanup_queue(vblk->disk->queue);
out_free_tags:
blk_mq_free_tag_set(&vblk->tag_set);
out_put_disk:
put_disk(vblk->disk);
out_free_vq:
......@@ -704,6 +716,8 @@ static void virtblk_remove(struct virtio_device *vdev)
del_gendisk(vblk->disk);
blk_cleanup_queue(vblk->disk->queue);
blk_mq_free_tag_set(&vblk->tag_set);
/* Stop all the virtqueues. */
vdev->config->reset(vdev);
......
......@@ -33,8 +33,6 @@ struct blk_mq_hw_ctx {
unsigned int nr_ctx_map;
unsigned long *ctx_map;
struct request **rqs;
struct list_head page_list;
struct blk_mq_tags *tags;
unsigned long queued;
......@@ -42,7 +40,6 @@ struct blk_mq_hw_ctx {
#define BLK_MQ_MAX_DISPATCH_ORDER 10
unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
unsigned int queue_depth;
unsigned int numa_node;
unsigned int cmd_size; /* per-request extra data */
......@@ -50,7 +47,7 @@ struct blk_mq_hw_ctx {
struct kobject kobj;
};
struct blk_mq_reg {
struct blk_mq_tag_set {
struct blk_mq_ops *ops;
unsigned int nr_hw_queues;
unsigned int queue_depth;
......@@ -59,18 +56,22 @@ struct blk_mq_reg {
int numa_node;
unsigned int timeout;
unsigned int flags; /* BLK_MQ_F_* */
void *driver_data;
struct blk_mq_tags **tags;
};
typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *);
typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int);
typedef struct blk_mq_hw_ctx *(alloc_hctx_fn)(struct blk_mq_reg *,unsigned int);
typedef struct blk_mq_hw_ctx *(alloc_hctx_fn)(struct blk_mq_tag_set *,
unsigned int);
typedef void (free_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
typedef int (init_request_fn)(void *, struct blk_mq_hw_ctx *,
struct request *, unsigned int);
typedef void (exit_request_fn)(void *, struct blk_mq_hw_ctx *,
struct request *, unsigned int);
typedef int (init_request_fn)(void *, struct request *, unsigned int,
unsigned int, unsigned int);
typedef void (exit_request_fn)(void *, struct request *, unsigned int,
unsigned int);
struct blk_mq_ops {
/*
......@@ -127,10 +128,13 @@ enum {
BLK_MQ_MAX_DEPTH = 2048,
};
struct request_queue *blk_mq_init_queue(struct blk_mq_reg *, void *);
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
int blk_mq_register_disk(struct gendisk *);
void blk_mq_unregister_disk(struct gendisk *);
int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
void blk_mq_insert_request(struct request *, bool, bool, bool);
......@@ -139,10 +143,10 @@ void blk_mq_free_request(struct request *rq);
bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp);
struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp);
struct request *blk_mq_rq_from_tag(struct request_queue *q, unsigned int tag);
struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_reg *, unsigned int);
struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int);
void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int);
bool blk_mq_end_io_partial(struct request *rq, int error,
......@@ -173,12 +177,6 @@ static inline void *blk_mq_rq_to_pdu(struct request *rq)
return (void *) rq + sizeof(*rq);
}
static inline struct request *blk_mq_tag_to_rq(struct blk_mq_hw_ctx *hctx,
unsigned int tag)
{
return hctx->rqs[tag];
}
#define queue_for_each_hw_ctx(q, hctx, i) \
for ((i) = 0; (i) < (q)->nr_hw_queues && \
({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment