Commit 585079b6 authored by Kanchan Joshi's avatar Kanchan Joshi Committed by Jens Axboe

nvme: wire up async polling for io passthrough commands

Store a cookie during submission, and use that to implement
completion-polling inside the ->uring_cmd_iopoll handler.
This handler makes use of existing bio poll facility.
Signed-off-by: default avatarKanchan Joshi <joshi.k@samsung.com>
Signed-off-by: default avatarAnuj Gupta <anuj20.g@samsung.com>
Link: https://lore.kernel.org/r/20220823161443.49436-5-joshi.k@samsung.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent c6e99ea4
...@@ -3976,6 +3976,7 @@ static const struct file_operations nvme_ns_chr_fops = { ...@@ -3976,6 +3976,7 @@ static const struct file_operations nvme_ns_chr_fops = {
.unlocked_ioctl = nvme_ns_chr_ioctl, .unlocked_ioctl = nvme_ns_chr_ioctl,
.compat_ioctl = compat_ptr_ioctl, .compat_ioctl = compat_ptr_ioctl,
.uring_cmd = nvme_ns_chr_uring_cmd, .uring_cmd = nvme_ns_chr_uring_cmd,
.uring_cmd_iopoll = nvme_ns_chr_uring_cmd_iopoll,
}; };
static int nvme_add_ns_cdev(struct nvme_ns *ns) static int nvme_add_ns_cdev(struct nvme_ns *ns)
......
...@@ -391,11 +391,19 @@ static void nvme_uring_cmd_end_io(struct request *req, blk_status_t err) ...@@ -391,11 +391,19 @@ static void nvme_uring_cmd_end_io(struct request *req, blk_status_t err)
struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd); struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
/* extract bio before reusing the same field for request */ /* extract bio before reusing the same field for request */
struct bio *bio = pdu->bio; struct bio *bio = pdu->bio;
void *cookie = READ_ONCE(ioucmd->cookie);
pdu->req = req; pdu->req = req;
req->bio = bio; req->bio = bio;
/* this takes care of moving rest of completion-work to task context */
io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_cb); /*
* For iopoll, complete it directly.
* Otherwise, move the completion to task work.
*/
if (cookie != NULL && blk_rq_is_poll(req))
nvme_uring_task_cb(ioucmd);
else
io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_cb);
} }
static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns, static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
...@@ -445,7 +453,10 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns, ...@@ -445,7 +453,10 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
rq_flags = REQ_NOWAIT; rq_flags = REQ_NOWAIT;
blk_flags = BLK_MQ_REQ_NOWAIT; blk_flags = BLK_MQ_REQ_NOWAIT;
} }
if (issue_flags & IO_URING_F_IOPOLL)
rq_flags |= REQ_POLLED;
retry:
req = nvme_alloc_user_request(q, &c, nvme_to_user_ptr(d.addr), req = nvme_alloc_user_request(q, &c, nvme_to_user_ptr(d.addr),
d.data_len, nvme_to_user_ptr(d.metadata), d.data_len, nvme_to_user_ptr(d.metadata),
d.metadata_len, 0, &meta, d.timeout_ms ? d.metadata_len, 0, &meta, d.timeout_ms ?
...@@ -456,6 +467,17 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns, ...@@ -456,6 +467,17 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
req->end_io = nvme_uring_cmd_end_io; req->end_io = nvme_uring_cmd_end_io;
req->end_io_data = ioucmd; req->end_io_data = ioucmd;
if (issue_flags & IO_URING_F_IOPOLL && rq_flags & REQ_POLLED) {
if (unlikely(!req->bio)) {
/* we can't poll this, so alloc regular req instead */
blk_mq_free_request(req);
rq_flags &= ~REQ_POLLED;
goto retry;
} else {
WRITE_ONCE(ioucmd->cookie, req->bio);
req->bio->bi_opf |= REQ_POLLED;
}
}
/* to free bio on completion, as req->bio will be null at that time */ /* to free bio on completion, as req->bio will be null at that time */
pdu->bio = req->bio; pdu->bio = req->bio;
pdu->meta = meta; pdu->meta = meta;
...@@ -559,9 +581,6 @@ long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg) ...@@ -559,9 +581,6 @@ long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
static int nvme_uring_cmd_checks(unsigned int issue_flags) static int nvme_uring_cmd_checks(unsigned int issue_flags)
{ {
/* IOPOLL not supported yet */
if (issue_flags & IO_URING_F_IOPOLL)
return -EOPNOTSUPP;
/* NVMe passthrough requires big SQE/CQE support */ /* NVMe passthrough requires big SQE/CQE support */
if ((issue_flags & (IO_URING_F_SQE128|IO_URING_F_CQE32)) != if ((issue_flags & (IO_URING_F_SQE128|IO_URING_F_CQE32)) !=
...@@ -604,6 +623,23 @@ int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags) ...@@ -604,6 +623,23 @@ int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
return nvme_ns_uring_cmd(ns, ioucmd, issue_flags); return nvme_ns_uring_cmd(ns, ioucmd, issue_flags);
} }
int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd)
{
struct bio *bio;
int ret = 0;
struct nvme_ns *ns;
struct request_queue *q;
rcu_read_lock();
bio = READ_ONCE(ioucmd->cookie);
ns = container_of(file_inode(ioucmd->file)->i_cdev,
struct nvme_ns, cdev);
q = ns->queue;
if (test_bit(QUEUE_FLAG_POLL, &q->queue_flags) && bio && bio->bi_bdev)
ret = bio_poll(bio, NULL, 0);
rcu_read_unlock();
return ret;
}
#ifdef CONFIG_NVME_MULTIPATH #ifdef CONFIG_NVME_MULTIPATH
static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd, static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
void __user *argp, struct nvme_ns_head *head, int srcu_idx) void __user *argp, struct nvme_ns_head *head, int srcu_idx)
...@@ -685,6 +721,29 @@ int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd, ...@@ -685,6 +721,29 @@ int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd,
srcu_read_unlock(&head->srcu, srcu_idx); srcu_read_unlock(&head->srcu, srcu_idx);
return ret; return ret;
} }
int nvme_ns_head_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd)
{
struct cdev *cdev = file_inode(ioucmd->file)->i_cdev;
struct nvme_ns_head *head = container_of(cdev, struct nvme_ns_head, cdev);
int srcu_idx = srcu_read_lock(&head->srcu);
struct nvme_ns *ns = nvme_find_path(head);
struct bio *bio;
int ret = 0;
struct request_queue *q;
if (ns) {
rcu_read_lock();
bio = READ_ONCE(ioucmd->cookie);
q = ns->queue;
if (test_bit(QUEUE_FLAG_POLL, &q->queue_flags) && bio
&& bio->bi_bdev)
ret = bio_poll(bio, NULL, 0);
rcu_read_unlock();
}
srcu_read_unlock(&head->srcu, srcu_idx);
return ret;
}
#endif /* CONFIG_NVME_MULTIPATH */ #endif /* CONFIG_NVME_MULTIPATH */
int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags) int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
...@@ -692,6 +751,10 @@ int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags) ...@@ -692,6 +751,10 @@ int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
struct nvme_ctrl *ctrl = ioucmd->file->private_data; struct nvme_ctrl *ctrl = ioucmd->file->private_data;
int ret; int ret;
/* IOPOLL not supported yet */
if (issue_flags & IO_URING_F_IOPOLL)
return -EOPNOTSUPP;
ret = nvme_uring_cmd_checks(issue_flags); ret = nvme_uring_cmd_checks(issue_flags);
if (ret) if (ret)
return ret; return ret;
......
...@@ -439,6 +439,7 @@ static const struct file_operations nvme_ns_head_chr_fops = { ...@@ -439,6 +439,7 @@ static const struct file_operations nvme_ns_head_chr_fops = {
.unlocked_ioctl = nvme_ns_head_chr_ioctl, .unlocked_ioctl = nvme_ns_head_chr_ioctl,
.compat_ioctl = compat_ptr_ioctl, .compat_ioctl = compat_ptr_ioctl,
.uring_cmd = nvme_ns_head_chr_uring_cmd, .uring_cmd = nvme_ns_head_chr_uring_cmd,
.uring_cmd_iopoll = nvme_ns_head_chr_uring_cmd_iopoll,
}; };
static int nvme_add_ns_head_cdev(struct nvme_ns_head *head) static int nvme_add_ns_head_cdev(struct nvme_ns_head *head)
......
...@@ -821,6 +821,8 @@ long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd, ...@@ -821,6 +821,8 @@ long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
unsigned long arg); unsigned long arg);
long nvme_dev_ioctl(struct file *file, unsigned int cmd, long nvme_dev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg); unsigned long arg);
int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd);
int nvme_ns_head_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd);
int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd, int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd,
unsigned int issue_flags); unsigned int issue_flags);
int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd, int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment