Commit 456cba38 authored by Kanchan Joshi's avatar Kanchan Joshi Committed by Jens Axboe

nvme: wire-up uring-cmd support for io-passthru on char-device.

Introduce handler for fops->uring_cmd(), implementing async passthru
on char device (/dev/ngX). The handler supports newly introduced
operation NVME_URING_CMD_IO. This operates on a new structure
nvme_uring_cmd, which is similar to struct nvme_passthru_cmd64 but
without the embedded 8b result field. This field is not needed since
uring-cmd allows to return additional result via big-CQE.
Signed-off-by: default avatarKanchan Joshi <joshi.k@samsung.com>
Signed-off-by: default avatarAnuj Gupta <anuj20.g@samsung.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20220511054750.20432-5-joshi.k@samsung.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent bcad2565
......@@ -3699,6 +3699,7 @@ static const struct file_operations nvme_ns_chr_fops = {
.release = nvme_ns_chr_release,
.unlocked_ioctl = nvme_ns_chr_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.uring_cmd = nvme_ns_chr_uring_cmd,
};
static int nvme_add_ns_cdev(struct nvme_ns *ns)
......
......@@ -5,6 +5,7 @@
*/
#include <linux/ptrace.h> /* for force_successful_syscall_return */
#include <linux/nvme_ioctl.h>
#include <linux/io_uring.h>
#include "nvme.h"
/*
......@@ -66,7 +67,8 @@ static int nvme_finish_user_metadata(struct request *req, void __user *ubuf,
static struct request *nvme_alloc_user_request(struct request_queue *q,
struct nvme_command *cmd, void __user *ubuffer,
unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
u32 meta_seed, void **metap, unsigned timeout, bool vec)
u32 meta_seed, void **metap, unsigned timeout, bool vec,
unsigned int rq_flags, blk_mq_req_flags_t blk_flags)
{
bool write = nvme_is_write(cmd);
struct nvme_ns *ns = q->queuedata;
......@@ -76,7 +78,7 @@ static struct request *nvme_alloc_user_request(struct request_queue *q,
void *meta = NULL;
int ret;
req = blk_mq_alloc_request(q, nvme_req_op(cmd), 0);
req = blk_mq_alloc_request(q, nvme_req_op(cmd) | rq_flags, blk_flags);
if (IS_ERR(req))
return req;
nvme_init_request(req, cmd);
......@@ -140,7 +142,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
int ret;
req = nvme_alloc_user_request(q, cmd, ubuffer, bufflen, meta_buffer,
meta_len, meta_seed, &meta, timeout, vec);
meta_len, meta_seed, &meta, timeout, vec, 0, 0);
if (IS_ERR(req))
return PTR_ERR(req);
......@@ -330,6 +332,139 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
return status;
}
struct nvme_uring_data {
__u64 metadata;
__u64 addr;
__u32 data_len;
__u32 metadata_len;
__u32 timeout_ms;
};
/*
* This overlays struct io_uring_cmd pdu.
* Expect build errors if this grows larger than that.
*/
struct nvme_uring_cmd_pdu {
union {
struct bio *bio;
struct request *req;
};
void *meta; /* kernel-resident buffer */
void __user *meta_buffer;
u32 meta_len;
};
static inline struct nvme_uring_cmd_pdu *nvme_uring_cmd_pdu(
struct io_uring_cmd *ioucmd)
{
return (struct nvme_uring_cmd_pdu *)&ioucmd->pdu;
}
static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd)
{
struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
struct request *req = pdu->req;
struct bio *bio = req->bio;
int status;
u64 result;
if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
status = -EINTR;
else
status = nvme_req(req)->status;
result = le64_to_cpu(nvme_req(req)->result.u64);
if (pdu->meta)
status = nvme_finish_user_metadata(req, pdu->meta_buffer,
pdu->meta, pdu->meta_len, status);
if (bio)
blk_rq_unmap_user(bio);
blk_mq_free_request(req);
io_uring_cmd_done(ioucmd, status, result);
}
static void nvme_uring_cmd_end_io(struct request *req, blk_status_t err)
{
struct io_uring_cmd *ioucmd = req->end_io_data;
struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
/* extract bio before reusing the same field for request */
struct bio *bio = pdu->bio;
pdu->req = req;
req->bio = bio;
/* this takes care of moving rest of completion-work to task context */
io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_cb);
}
static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
struct io_uring_cmd *ioucmd, unsigned int issue_flags)
{
struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
const struct nvme_uring_cmd *cmd = ioucmd->cmd;
struct request_queue *q = ns ? ns->queue : ctrl->admin_q;
struct nvme_uring_data d;
struct nvme_command c;
struct request *req;
unsigned int rq_flags = 0;
blk_mq_req_flags_t blk_flags = 0;
void *meta = NULL;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
c.common.opcode = READ_ONCE(cmd->opcode);
c.common.flags = READ_ONCE(cmd->flags);
if (c.common.flags)
return -EINVAL;
c.common.command_id = 0;
c.common.nsid = cpu_to_le32(cmd->nsid);
if (!nvme_validate_passthru_nsid(ctrl, ns, le32_to_cpu(c.common.nsid)))
return -EINVAL;
c.common.cdw2[0] = cpu_to_le32(READ_ONCE(cmd->cdw2));
c.common.cdw2[1] = cpu_to_le32(READ_ONCE(cmd->cdw3));
c.common.metadata = 0;
c.common.dptr.prp1 = c.common.dptr.prp2 = 0;
c.common.cdw10 = cpu_to_le32(READ_ONCE(cmd->cdw10));
c.common.cdw11 = cpu_to_le32(READ_ONCE(cmd->cdw11));
c.common.cdw12 = cpu_to_le32(READ_ONCE(cmd->cdw12));
c.common.cdw13 = cpu_to_le32(READ_ONCE(cmd->cdw13));
c.common.cdw14 = cpu_to_le32(READ_ONCE(cmd->cdw14));
c.common.cdw15 = cpu_to_le32(READ_ONCE(cmd->cdw15));
d.metadata = READ_ONCE(cmd->metadata);
d.addr = READ_ONCE(cmd->addr);
d.data_len = READ_ONCE(cmd->data_len);
d.metadata_len = READ_ONCE(cmd->metadata_len);
d.timeout_ms = READ_ONCE(cmd->timeout_ms);
if (issue_flags & IO_URING_F_NONBLOCK) {
rq_flags = REQ_NOWAIT;
blk_flags = BLK_MQ_REQ_NOWAIT;
}
req = nvme_alloc_user_request(q, &c, nvme_to_user_ptr(d.addr),
d.data_len, nvme_to_user_ptr(d.metadata),
d.metadata_len, 0, &meta, d.timeout_ms ?
msecs_to_jiffies(d.timeout_ms) : 0, 0, rq_flags,
blk_flags);
if (IS_ERR(req))
return PTR_ERR(req);
req->end_io_data = ioucmd;
/* to free bio on completion, as req->bio will be null at that time */
pdu->bio = req->bio;
pdu->meta = meta;
pdu->meta_buffer = nvme_to_user_ptr(d.metadata);
pdu->meta_len = d.metadata_len;
blk_execute_rq_nowait(req, 0, nvme_uring_cmd_end_io);
return -EIOCBQUEUED;
}
static bool is_ctrl_ioctl(unsigned int cmd)
{
if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD)
......@@ -421,6 +556,42 @@ long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return __nvme_ioctl(ns, cmd, (void __user *)arg);
}
static int nvme_ns_uring_cmd(struct nvme_ns *ns, struct io_uring_cmd *ioucmd,
unsigned int issue_flags)
{
struct nvme_ctrl *ctrl = ns->ctrl;
int ret;
BUILD_BUG_ON(sizeof(struct nvme_uring_cmd_pdu) > sizeof(ioucmd->pdu));
/* IOPOLL not supported yet */
if (issue_flags & IO_URING_F_IOPOLL)
return -EOPNOTSUPP;
/* NVMe passthrough requires bit SQE/CQE support */
if ((issue_flags & (IO_URING_F_SQE128|IO_URING_F_CQE32)) !=
(IO_URING_F_SQE128|IO_URING_F_CQE32))
return -EOPNOTSUPP;
switch (ioucmd->cmd_op) {
case NVME_URING_CMD_IO:
ret = nvme_uring_cmd_io(ctrl, ns, ioucmd, issue_flags);
break;
default:
ret = -ENOTTY;
}
return ret;
}
int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
{
struct nvme_ns *ns = container_of(file_inode(ioucmd->file)->i_cdev,
struct nvme_ns, cdev);
return nvme_ns_uring_cmd(ns, ioucmd, issue_flags);
}
#ifdef CONFIG_NVME_MULTIPATH
static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
void __user *argp, struct nvme_ns_head *head, int srcu_idx)
......@@ -487,6 +658,21 @@ long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
srcu_read_unlock(&head->srcu, srcu_idx);
return ret;
}
int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd,
unsigned int issue_flags)
{
struct cdev *cdev = file_inode(ioucmd->file)->i_cdev;
struct nvme_ns_head *head = container_of(cdev, struct nvme_ns_head, cdev);
int srcu_idx = srcu_read_lock(&head->srcu);
struct nvme_ns *ns = nvme_find_path(head);
int ret = -EINVAL;
if (ns)
ret = nvme_ns_uring_cmd(ns, ioucmd, issue_flags);
srcu_read_unlock(&head->srcu, srcu_idx);
return ret;
}
#endif /* CONFIG_NVME_MULTIPATH */
static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp)
......
......@@ -437,6 +437,7 @@ static const struct file_operations nvme_ns_head_chr_fops = {
.release = nvme_ns_head_chr_release,
.unlocked_ioctl = nvme_ns_head_chr_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.uring_cmd = nvme_ns_head_chr_uring_cmd,
};
static int nvme_add_ns_head_cdev(struct nvme_ns_head *head)
......
......@@ -782,6 +782,10 @@ long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
unsigned long arg);
long nvme_dev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg);
int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd,
unsigned int issue_flags);
int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd,
unsigned int issue_flags);
int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo);
extern const struct attribute_group *nvme_ns_id_attr_groups[];
......
......@@ -70,6 +70,28 @@ struct nvme_passthru_cmd64 {
__u64 result;
};
/* same as struct nvme_passthru_cmd64, minus the 8b result field */
struct nvme_uring_cmd {
__u8 opcode;
__u8 flags;
__u16 rsvd1;
__u32 nsid;
__u32 cdw2;
__u32 cdw3;
__u64 metadata;
__u64 addr;
__u32 metadata_len;
__u32 data_len;
__u32 cdw10;
__u32 cdw11;
__u32 cdw12;
__u32 cdw13;
__u32 cdw14;
__u32 cdw15;
__u32 timeout_ms;
__u32 rsvd2;
};
#define nvme_admin_cmd nvme_passthru_cmd
#define NVME_IOCTL_ID _IO('N', 0x40)
......@@ -83,4 +105,7 @@ struct nvme_passthru_cmd64 {
#define NVME_IOCTL_IO64_CMD _IOWR('N', 0x48, struct nvme_passthru_cmd64)
#define NVME_IOCTL_IO64_CMD_VEC _IOWR('N', 0x49, struct nvme_passthru_cmd64)
/* io_uring async commands: */
#define NVME_URING_CMD_IO _IOWR('N', 0x80, struct nvme_uring_cmd)
#endif /* _UAPI_LINUX_NVME_IOCTL_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment