Commit ecb0a83e authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

ubd: remove use of blk_rq_map_sg

There is no good reason to create a scatterlist in the ubd driver,
it can just iterate the request directly.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
[rw: Folded in improvements as discussed with hch and jens]
Signed-off-by: default avatarRichard Weinberger <richard@nod.at>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 6956b956
...@@ -160,12 +160,6 @@ struct ubd { ...@@ -160,12 +160,6 @@ struct ubd {
spinlock_t lock; spinlock_t lock;
}; };
struct ubd_pdu {
struct scatterlist sg[MAX_SG];
int start_sg, end_sg;
sector_t rq_pos;
};
#define DEFAULT_COW { \ #define DEFAULT_COW { \
.file = NULL, \ .file = NULL, \
.fd = -1, \ .fd = -1, \
...@@ -197,9 +191,6 @@ static struct proc_dir_entry *proc_ide = NULL; ...@@ -197,9 +191,6 @@ static struct proc_dir_entry *proc_ide = NULL;
static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx, static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd); const struct blk_mq_queue_data *bd);
static int ubd_init_request(struct blk_mq_tag_set *set,
struct request *req, unsigned int hctx_idx,
unsigned int numa_node);
static void make_proc_ide(void) static void make_proc_ide(void)
{ {
...@@ -895,7 +886,6 @@ static int ubd_disk_register(int major, u64 size, int unit, ...@@ -895,7 +886,6 @@ static int ubd_disk_register(int major, u64 size, int unit,
static const struct blk_mq_ops ubd_mq_ops = { static const struct blk_mq_ops ubd_mq_ops = {
.queue_rq = ubd_queue_rq, .queue_rq = ubd_queue_rq,
.init_request = ubd_init_request,
}; };
static int ubd_add(int n, char **error_out) static int ubd_add(int n, char **error_out)
...@@ -918,7 +908,6 @@ static int ubd_add(int n, char **error_out) ...@@ -918,7 +908,6 @@ static int ubd_add(int n, char **error_out)
ubd_dev->tag_set.queue_depth = 64; ubd_dev->tag_set.queue_depth = 64;
ubd_dev->tag_set.numa_node = NUMA_NO_NODE; ubd_dev->tag_set.numa_node = NUMA_NO_NODE;
ubd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; ubd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
ubd_dev->tag_set.cmd_size = sizeof(struct ubd_pdu);
ubd_dev->tag_set.driver_data = ubd_dev; ubd_dev->tag_set.driver_data = ubd_dev;
ubd_dev->tag_set.nr_hw_queues = 1; ubd_dev->tag_set.nr_hw_queues = 1;
...@@ -1300,123 +1289,84 @@ static void cowify_req(struct io_thread_req *req, unsigned long *bitmap, ...@@ -1300,123 +1289,84 @@ static void cowify_req(struct io_thread_req *req, unsigned long *bitmap,
req->bitmap_words, bitmap_len); req->bitmap_words, bitmap_len);
} }
/* Called with dev->lock held */ static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req,
static void prepare_request(struct request *req, struct io_thread_req *io_req, u64 off, struct bio_vec *bvec)
unsigned long long offset, int page_offset,
int len, struct page *page)
{ {
struct gendisk *disk = req->rq_disk; struct ubd *dev = hctx->queue->queuedata;
struct ubd *ubd_dev = disk->private_data; struct io_thread_req *io_req;
int ret;
io_req = kmalloc(sizeof(struct io_thread_req), GFP_ATOMIC);
if (!io_req)
return -ENOMEM;
io_req->req = req; io_req->req = req;
io_req->fds[0] = (ubd_dev->cow.file != NULL) ? ubd_dev->cow.fd : if (dev->cow.file)
ubd_dev->fd; io_req->fds[0] = dev->cow.fd;
io_req->fds[1] = ubd_dev->fd; else
io_req->fds[0] = dev->fd;
if (req_op(req) == REQ_OP_FLUSH) {
io_req->op = UBD_FLUSH;
} else {
io_req->fds[1] = dev->fd;
io_req->cow_offset = -1; io_req->cow_offset = -1;
io_req->offset = offset; io_req->offset = off;
io_req->length = len; io_req->length = bvec->bv_len;
io_req->error = 0; io_req->error = 0;
io_req->sector_mask = 0; io_req->sector_mask = 0;
io_req->op = (rq_data_dir(req) == READ) ? UBD_READ : UBD_WRITE; io_req->op = rq_data_dir(req) == READ ? UBD_READ : UBD_WRITE;
io_req->offsets[0] = 0; io_req->offsets[0] = 0;
io_req->offsets[1] = ubd_dev->cow.data_offset; io_req->offsets[1] = dev->cow.data_offset;
io_req->buffer = page_address(page) + page_offset; io_req->buffer = page_address(bvec->bv_page) + bvec->bv_offset;
io_req->sectorsize = 1 << 9; io_req->sectorsize = 1 << 9;
if(ubd_dev->cow.file != NULL) if (dev->cow.file) {
cowify_req(io_req, ubd_dev->cow.bitmap, cowify_req(io_req, dev->cow.bitmap,
ubd_dev->cow.bitmap_offset, ubd_dev->cow.bitmap_len); dev->cow.bitmap_offset, dev->cow.bitmap_len);
}
} }
/* Called with dev->lock held */
static void prepare_flush_request(struct request *req,
struct io_thread_req *io_req)
{
struct gendisk *disk = req->rq_disk;
struct ubd *ubd_dev = disk->private_data;
io_req->req = req;
io_req->fds[0] = (ubd_dev->cow.file != NULL) ? ubd_dev->cow.fd :
ubd_dev->fd;
io_req->op = UBD_FLUSH;
}
static void submit_request(struct io_thread_req *io_req, struct ubd *dev)
{
int n = os_write_file(thread_fd, &io_req,
sizeof(io_req));
if (n != sizeof(io_req)) {
if (n != -EAGAIN)
pr_err("write to io thread failed: %d\n", -n);
blk_mq_requeue_request(io_req->req, true); ret = os_write_file(thread_fd, &io_req, sizeof(io_req));
if (ret != sizeof(io_req)) {
if (ret != -EAGAIN)
pr_err("write to io thread failed: %d\n", -ret);
kfree(io_req); kfree(io_req);
} }
return ret;
} }
static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx, static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd) const struct blk_mq_queue_data *bd)
{ {
struct request *req = bd->rq; struct request *req = bd->rq;
struct ubd *dev = hctx->queue->queuedata; int ret = 0;
struct ubd_pdu *pdu = blk_mq_rq_to_pdu(req);
struct io_thread_req *io_req;
blk_mq_start_request(req); blk_mq_start_request(req);
pdu->rq_pos = blk_rq_pos(req);
pdu->start_sg = 0;
pdu->end_sg = blk_rq_map_sg(req->q, req, pdu->sg);
if (req_op(req) == REQ_OP_FLUSH) { if (req_op(req) == REQ_OP_FLUSH) {
io_req = kmalloc(sizeof(struct io_thread_req), GFP_ATOMIC); ret = ubd_queue_one_vec(hctx, req, 0, NULL);
if (io_req == NULL) { } else {
blk_mq_requeue_request(req, true); struct req_iterator iter;
goto done; struct bio_vec bvec;
} u64 off = (u64)blk_rq_pos(req) << 9;
prepare_flush_request(req, io_req);
submit_request(io_req, dev);
goto done; rq_for_each_segment(bvec, req, iter) {
ret = ubd_queue_one_vec(hctx, req, off, &bvec);
if (ret < 0)
goto out;
off += bvec.bv_len;
} }
while (pdu->start_sg < pdu->end_sg) {
struct scatterlist *sg = &pdu->sg[pdu->start_sg];
io_req = kmalloc(sizeof(struct io_thread_req),
GFP_ATOMIC);
if (io_req == NULL) {
blk_mq_requeue_request(req, true);
goto done;
} }
prepare_request(req, io_req, out:
(unsigned long long)pdu->rq_pos << 9, if (ret < 0) {
sg->offset, sg->length, sg_page(sg)); blk_mq_requeue_request(req, true);
submit_request(io_req, dev);
pdu->rq_pos += sg->length >> 9;
pdu->start_sg++;
} }
done:
return BLK_STS_OK; return BLK_STS_OK;
} }
static int ubd_init_request(struct blk_mq_tag_set *set,
struct request *req, unsigned int hctx_idx,
unsigned int numa_node)
{
struct ubd_pdu *pdu = blk_mq_rq_to_pdu(req);
sg_init_table(pdu->sg, MAX_SG);
return 0;
}
static int ubd_getgeo(struct block_device *bdev, struct hd_geometry *geo) static int ubd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{ {
struct ubd *ubd_dev = bdev->bd_disk->private_data; struct ubd *ubd_dev = bdev->bd_disk->private_data;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment