Commit a254d989 authored by Jens Axboe's avatar Jens Axboe

Merge branch 'nvme-4.14' of git://git.infradead.org/nvme into for-4.14/block-postmerge

Pull NVMe updates from Christoph:

"A few more nvme updates for 4.14:

 - generate a correct default NQN (Daniel Verkamp)
 - metadata passthrough for the NVME_IOCTL_IO_CMD ioctl, as well as
   related fixes and cleanups (Keith)
 - better scalability for connecting to the NVMeOF target (Roland Dreier)
 - target support for reading the host identifier (Omri Mann)"
parents 40326d8a 40a5fce4
......@@ -600,10 +600,44 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
}
EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
void __user *ubuffer, unsigned bufflen,
void __user *meta_buffer, unsigned meta_len, u32 meta_seed,
u32 *result, unsigned timeout)
static void *nvme_add_user_metadata(struct bio *bio, void __user *ubuf,
unsigned len, u32 seed, bool write)
{
struct bio_integrity_payload *bip;
int ret = -ENOMEM;
void *buf;
buf = kmalloc(len, GFP_KERNEL);
if (!buf)
goto out;
ret = -EFAULT;
if (write && copy_from_user(buf, ubuf, len))
goto out_free_meta;
bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
if (IS_ERR(bip)) {
ret = PTR_ERR(bip);
goto out_free_meta;
}
bip->bip_iter.bi_size = len;
bip->bip_iter.bi_sector = seed;
ret = bio_integrity_add_page(bio, virt_to_page(buf), len,
offset_in_page(buf));
if (ret == len)
return buf;
ret = -ENOMEM;
out_free_meta:
kfree(buf);
out:
return ERR_PTR(ret);
}
static int nvme_submit_user_cmd(struct request_queue *q,
struct nvme_command *cmd, void __user *ubuffer,
unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
u32 meta_seed, u32 *result, unsigned timeout)
{
bool write = nvme_is_write(cmd);
struct nvme_ns *ns = q->queuedata;
......@@ -625,46 +659,17 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
if (ret)
goto out;
bio = req->bio;
if (!disk)
goto submit;
bio->bi_disk = disk;
if (meta_buffer && meta_len) {
struct bio_integrity_payload *bip;
meta = kmalloc(meta_len, GFP_KERNEL);
if (!meta) {
ret = -ENOMEM;
if (disk && meta_buffer && meta_len) {
meta = nvme_add_user_metadata(bio, meta_buffer, meta_len,
meta_seed, write);
if (IS_ERR(meta)) {
ret = PTR_ERR(meta);
goto out_unmap;
}
if (write) {
if (copy_from_user(meta, meta_buffer,
meta_len)) {
ret = -EFAULT;
goto out_free_meta;
}
}
bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
if (IS_ERR(bip)) {
ret = PTR_ERR(bip);
goto out_free_meta;
}
bip->bip_iter.bi_size = meta_len;
bip->bip_iter.bi_sector = meta_seed;
ret = bio_integrity_add_page(bio, virt_to_page(meta),
meta_len, offset_in_page(meta));
if (ret != meta_len) {
ret = -ENOMEM;
goto out_free_meta;
}
}
}
submit:
blk_execute_rq(req->q, disk, req, 0);
if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
ret = -EINTR;
......@@ -676,7 +681,6 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
if (copy_to_user(meta_buffer, meta, meta_len))
ret = -EFAULT;
}
out_free_meta:
kfree(meta);
out_unmap:
if (bio)
......@@ -686,14 +690,6 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
return ret;
}
int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
void __user *ubuffer, unsigned bufflen, u32 *result,
unsigned timeout)
{
return __nvme_submit_user_cmd(q, cmd, ubuffer, bufflen, NULL, 0, 0,
result, timeout);
}
static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
{
struct nvme_ctrl *ctrl = rq->end_io_data;
......@@ -983,7 +979,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
c.rw.apptag = cpu_to_le16(io.apptag);
c.rw.appmask = cpu_to_le16(io.appmask);
return __nvme_submit_user_cmd(ns->queue, &c,
return nvme_submit_user_cmd(ns->queue, &c,
(void __user *)(uintptr_t)io.addr, length,
metadata, meta_len, io.slba, NULL, 0);
}
......@@ -1021,7 +1017,8 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
(void __user *)(uintptr_t)cmd.addr, cmd.data_len,
&cmd.result, timeout);
(void __user *)(uintptr_t)cmd.metadata, cmd.metadata,
0, &cmd.result, timeout);
if (status >= 0) {
if (put_user(cmd.result, &ucmd->result))
return -EFAULT;
......
......@@ -22,7 +22,7 @@
#include "fabrics.h"
static LIST_HEAD(nvmf_transports);
static DEFINE_MUTEX(nvmf_transports_mutex);
static DECLARE_RWSEM(nvmf_transports_rwsem);
static LIST_HEAD(nvmf_hosts);
static DEFINE_MUTEX(nvmf_hosts_mutex);
......@@ -75,7 +75,7 @@ static struct nvmf_host *nvmf_host_default(void)
kref_init(&host->ref);
snprintf(host->nqn, NVMF_NQN_SIZE,
"nqn.2014-08.org.nvmexpress:NVMf:uuid:%pUb", &host->id);
"nqn.2014-08.org.nvmexpress:uuid:%pUb", &host->id);
mutex_lock(&nvmf_hosts_mutex);
list_add_tail(&host->list, &nvmf_hosts);
......@@ -495,9 +495,9 @@ int nvmf_register_transport(struct nvmf_transport_ops *ops)
if (!ops->create_ctrl)
return -EINVAL;
mutex_lock(&nvmf_transports_mutex);
down_write(&nvmf_transports_rwsem);
list_add_tail(&ops->entry, &nvmf_transports);
mutex_unlock(&nvmf_transports_mutex);
up_write(&nvmf_transports_rwsem);
return 0;
}
......@@ -514,9 +514,9 @@ EXPORT_SYMBOL_GPL(nvmf_register_transport);
*/
void nvmf_unregister_transport(struct nvmf_transport_ops *ops)
{
mutex_lock(&nvmf_transports_mutex);
down_write(&nvmf_transports_rwsem);
list_del(&ops->entry);
mutex_unlock(&nvmf_transports_mutex);
up_write(&nvmf_transports_rwsem);
}
EXPORT_SYMBOL_GPL(nvmf_unregister_transport);
......@@ -525,7 +525,7 @@ static struct nvmf_transport_ops *nvmf_lookup_transport(
{
struct nvmf_transport_ops *ops;
lockdep_assert_held(&nvmf_transports_mutex);
lockdep_assert_held(&nvmf_transports_rwsem);
list_for_each_entry(ops, &nvmf_transports, entry) {
if (strcmp(ops->name, opts->transport) == 0)
......@@ -851,7 +851,7 @@ nvmf_create_ctrl(struct device *dev, const char *buf, size_t count)
goto out_free_opts;
opts->mask &= ~NVMF_REQUIRED_OPTS;
mutex_lock(&nvmf_transports_mutex);
down_read(&nvmf_transports_rwsem);
ops = nvmf_lookup_transport(opts);
if (!ops) {
pr_info("no handler found for transport %s.\n",
......@@ -878,16 +878,16 @@ nvmf_create_ctrl(struct device *dev, const char *buf, size_t count)
dev_warn(ctrl->device,
"controller returned incorrect NQN: \"%s\".\n",
ctrl->subnqn);
mutex_unlock(&nvmf_transports_mutex);
up_read(&nvmf_transports_rwsem);
ctrl->ops->delete_ctrl(ctrl);
return ERR_PTR(-EINVAL);
}
mutex_unlock(&nvmf_transports_mutex);
up_read(&nvmf_transports_rwsem);
return ctrl;
out_unlock:
mutex_unlock(&nvmf_transports_mutex);
up_read(&nvmf_transports_rwsem);
out_free_opts:
nvmf_free_options(opts);
return ERR_PTR(ret);
......
......@@ -314,13 +314,6 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
union nvme_result *result, void *buffer, unsigned bufflen,
unsigned timeout, int qid, int at_head, int flags);
int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
void __user *ubuffer, unsigned bufflen, u32 *result,
unsigned timeout);
int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
void __user *ubuffer, unsigned bufflen,
void __user *meta_buffer, unsigned meta_len, u32 meta_seed,
u32 *result, unsigned timeout);
int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
void nvme_start_keep_alive(struct nvme_ctrl *ctrl);
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
......
......@@ -668,7 +668,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
if (blk_rq_map_integrity_sg(q, req->bio, &iod->meta_sg) != 1)
goto out_unmap;
if (rq_data_dir(req))
if (req_op(req) == REQ_OP_WRITE)
nvme_dif_remap(req, nvme_dif_prep);
if (!dma_map_sg(dev->dev, &iod->meta_sg, 1, dma_dir))
......@@ -696,7 +696,7 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
if (iod->nents) {
dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir);
if (blk_integrity_rq(req)) {
if (!rq_data_dir(req))
if (req_op(req) == REQ_OP_READ)
nvme_dif_remap(req, nvme_dif_complete);
dma_unmap_sg(dev->dev, &iod->meta_sg, 1, dma_dir);
}
......
......@@ -443,7 +443,7 @@ static void nvmet_execute_set_features(struct nvmet_req *req)
u32 val32;
u16 status = 0;
switch (cdw10 & 0xf) {
switch (cdw10 & 0xff) {
case NVME_FEAT_NUM_QUEUES:
nvmet_set_result(req,
(subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
......@@ -453,6 +453,9 @@ static void nvmet_execute_set_features(struct nvmet_req *req)
req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
nvmet_set_result(req, req->sq->ctrl->kato);
break;
case NVME_FEAT_HOST_ID:
status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
break;
default:
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
break;
......@@ -467,7 +470,7 @@ static void nvmet_execute_get_features(struct nvmet_req *req)
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]);
u16 status = 0;
switch (cdw10 & 0xf) {
switch (cdw10 & 0xff) {
/*
* These features are mandatory in the spec, but we don't
* have a useful way to implement them. We'll eventually
......@@ -501,6 +504,16 @@ static void nvmet_execute_get_features(struct nvmet_req *req)
case NVME_FEAT_KATO:
nvmet_set_result(req, req->sq->ctrl->kato * 1000);
break;
case NVME_FEAT_HOST_ID:
/* need 128-bit host identifier flag */
if (!(req->cmd->common.cdw10[1] & cpu_to_le32(1 << 0))) {
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
break;
}
status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
sizeof(req->sq->ctrl->hostid));
break;
default:
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
break;
......
......@@ -154,6 +154,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
le32_to_cpu(c->kato), &ctrl);
if (status)
goto out;
uuid_copy(&ctrl->hostid, &d->hostid);
status = nvmet_install_queue(ctrl, req);
if (status) {
......
......@@ -115,6 +115,7 @@ struct nvmet_ctrl {
u32 cc;
u32 csts;
uuid_t hostid;
u16 cntlid;
u32 kato;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment