Commit 735d77d4 authored by Guoqing Jiang's avatar Guoqing Jiang Committed by Jens Axboe

rnbd: remove rnbd_dev_submit_io

The function only has one caller, so let's open code it in process_rdma.
Another bonus is we can avoid push/pop stack, since we need to pass 8
arguments to rnbd_dev_submit_io.
Signed-off-by: default avatarGuoqing Jiang <guoqing.jiang@cloud.ionos.com>
Acked-by: default avatarDanil Kipnis <danil.kipnis@cloud.ionos.com>
Acked-by: default avatarJack Wang <jinpu.wang@cloud.ionos.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent b35fd742
...@@ -45,7 +45,7 @@ void rnbd_dev_close(struct rnbd_dev *dev) ...@@ -45,7 +45,7 @@ void rnbd_dev_close(struct rnbd_dev *dev)
kfree(dev); kfree(dev);
} }
static void rnbd_dev_bi_end_io(struct bio *bio) void rnbd_dev_bi_end_io(struct bio *bio)
{ {
struct rnbd_dev_blk_io *io = bio->bi_private; struct rnbd_dev_blk_io *io = bio->bi_private;
...@@ -63,8 +63,8 @@ static void rnbd_dev_bi_end_io(struct bio *bio) ...@@ -63,8 +63,8 @@ static void rnbd_dev_bi_end_io(struct bio *bio)
* Map the kernel address into a bio suitable for io to a block * Map the kernel address into a bio suitable for io to a block
* device. Returns an error pointer in case of error. * device. Returns an error pointer in case of error.
*/ */
static struct bio *rnbd_bio_map_kern(void *data, struct bio_set *bs, struct bio *rnbd_bio_map_kern(void *data, struct bio_set *bs,
unsigned int len, gfp_t gfp_mask) unsigned int len, gfp_t gfp_mask)
{ {
unsigned long kaddr = (unsigned long)data; unsigned long kaddr = (unsigned long)data;
unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
...@@ -102,33 +102,3 @@ static struct bio *rnbd_bio_map_kern(void *data, struct bio_set *bs, ...@@ -102,33 +102,3 @@ static struct bio *rnbd_bio_map_kern(void *data, struct bio_set *bs,
bio->bi_end_io = bio_put; bio->bi_end_io = bio_put;
return bio; return bio;
} }
int rnbd_dev_submit_io(struct rnbd_dev *dev, sector_t sector, void *data,
size_t len, u32 bi_size, enum rnbd_io_flags flags,
short prio, void *priv)
{
struct rnbd_dev_blk_io *io;
struct bio *bio;
/* Generate bio with pages pointing to the rdma buffer */
bio = rnbd_bio_map_kern(data, dev->ibd_bio_set, len, GFP_KERNEL);
if (IS_ERR(bio))
return PTR_ERR(bio);
io = container_of(bio, struct rnbd_dev_blk_io, bio);
io->dev = dev;
io->priv = priv;
bio->bi_end_io = rnbd_dev_bi_end_io;
bio->bi_private = io;
bio->bi_opf = rnbd_to_bio_flags(flags);
bio->bi_iter.bi_sector = sector;
bio->bi_iter.bi_size = bi_size;
bio_set_prio(bio, prio);
bio_set_dev(bio, dev->bdev);
submit_bio(bio);
return 0;
}
...@@ -41,6 +41,11 @@ void rnbd_dev_close(struct rnbd_dev *dev); ...@@ -41,6 +41,11 @@ void rnbd_dev_close(struct rnbd_dev *dev);
void rnbd_endio(void *priv, int error); void rnbd_endio(void *priv, int error);
void rnbd_dev_bi_end_io(struct bio *bio);
struct bio *rnbd_bio_map_kern(void *data, struct bio_set *bs,
unsigned int len, gfp_t gfp_mask);
static inline int rnbd_dev_get_max_segs(const struct rnbd_dev *dev) static inline int rnbd_dev_get_max_segs(const struct rnbd_dev *dev)
{ {
return queue_max_segments(bdev_get_queue(dev->bdev)); return queue_max_segments(bdev_get_queue(dev->bdev));
...@@ -75,18 +80,4 @@ static inline int rnbd_dev_get_discard_alignment(const struct rnbd_dev *dev) ...@@ -75,18 +80,4 @@ static inline int rnbd_dev_get_discard_alignment(const struct rnbd_dev *dev)
return bdev_get_queue(dev->bdev)->limits.discard_alignment; return bdev_get_queue(dev->bdev)->limits.discard_alignment;
} }
/**
* rnbd_dev_submit_io() - Submit an I/O to the disk
* @dev: device to that the I/O is submitted
* @sector: address to read/write data to
* @data: I/O data to write or buffer to read I/O date into
* @len: length of @data
* @bi_size: Amount of data that will be read/written
* @prio: IO priority
* @priv: private data passed to @io_fn
*/
int rnbd_dev_submit_io(struct rnbd_dev *dev, sector_t sector, void *data,
size_t len, u32 bi_size, enum rnbd_io_flags flags,
short prio, void *priv);
#endif /* RNBD_SRV_DEV_H */ #endif /* RNBD_SRV_DEV_H */
...@@ -124,6 +124,9 @@ static int process_rdma(struct rtrs_srv *sess, ...@@ -124,6 +124,9 @@ static int process_rdma(struct rtrs_srv *sess,
struct rnbd_srv_sess_dev *sess_dev; struct rnbd_srv_sess_dev *sess_dev;
u32 dev_id; u32 dev_id;
int err; int err;
struct rnbd_dev_blk_io *io;
struct bio *bio;
short prio;
priv = kmalloc(sizeof(*priv), GFP_KERNEL); priv = kmalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) if (!priv)
...@@ -142,18 +145,29 @@ static int process_rdma(struct rtrs_srv *sess, ...@@ -142,18 +145,29 @@ static int process_rdma(struct rtrs_srv *sess,
priv->sess_dev = sess_dev; priv->sess_dev = sess_dev;
priv->id = id; priv->id = id;
err = rnbd_dev_submit_io(sess_dev->rnbd_dev, le64_to_cpu(msg->sector), /* Generate bio with pages pointing to the rdma buffer */
data, datalen, le32_to_cpu(msg->bi_size), bio = rnbd_bio_map_kern(data, sess_dev->rnbd_dev->ibd_bio_set, datalen, GFP_KERNEL);
le32_to_cpu(msg->rw), if (IS_ERR(bio)) {
srv_sess->ver < RNBD_PROTO_VER_MAJOR || rnbd_srv_err(sess_dev, "Failed to generate bio, err: %ld\n", PTR_ERR(bio));
usrlen < sizeof(*msg) ?
0 : le16_to_cpu(msg->prio), priv);
if (unlikely(err)) {
rnbd_srv_err(sess_dev, "Submitting I/O to device failed, err: %d\n",
err);
goto sess_dev_put; goto sess_dev_put;
} }
io = container_of(bio, struct rnbd_dev_blk_io, bio);
io->dev = sess_dev->rnbd_dev;
io->priv = priv;
bio->bi_end_io = rnbd_dev_bi_end_io;
bio->bi_private = io;
bio->bi_opf = rnbd_to_bio_flags(le32_to_cpu(msg->rw));
bio->bi_iter.bi_sector = le64_to_cpu(msg->sector);
bio->bi_iter.bi_size = le32_to_cpu(msg->bi_size);
prio = srv_sess->ver < RNBD_PROTO_VER_MAJOR ||
usrlen < sizeof(*msg) ? 0 : le16_to_cpu(msg->prio);
bio_set_prio(bio, prio);
bio_set_dev(bio, sess_dev->rnbd_dev->bdev);
submit_bio(bio);
return 0; return 0;
sess_dev_put: sess_dev_put:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment