Commit eca53cb6 authored by Jens Axboe's avatar Jens Axboe

Merge branch 'nvme-4.19' of git://git.infradead.org/nvme into for-4.19/block

Pull NVMe updates from Christoph:

"Highlights:

 - massively improved tracepoints (Keith Busch)
 - support for larger inline data in the RDMA host and target
   (Steve Wise)
 - RDMA setup/teardown path fixes and refactor (Sagi Grimberg)
 - Command Supported and Effects log support for the NVMe target
   (Chaitanya Kulkarni)
 - buffered I/O support for the NVMe target (Chaitanya Kulkarni)

 plus the usual set of cleanups and small enhancements."

* 'nvme-4.19' of git://git.infradead.org/nvme:
  nvmet: don't use uuid_le type
  nvmet: check fileio lba range access boundaries
  nvmet: fix file discard return status
  nvme-rdma: centralize admin/io queue teardown sequence
  nvme-rdma: centralize controller setup sequence
  nvme-rdma: unquiesce queues when deleting the controller
  nvme-rdma: mark expected switch fall-through
  nvme: add disk name to trace events
  nvme: add controller name to trace events
  nvme: use hw qid in trace events
  nvme: cache struct nvme_ctrl reference to struct nvme_request
  nvmet-rdma: add an error flow for post_recv failures
  nvmet-rdma: add unlikely check in the fast path
  nvmet-rdma: support max(16KB, PAGE_SIZE) inline data
  nvme-rdma: support up to 4 segments of inline data
  nvmet: add buffered I/O support for file backed ns
  nvmet: add commands supported and effects log page
  nvme: move init of keep_alive work item to controller initialization
  nvme.h: resync with nvme-cli
parents 42c9cdfe 1b0d2745
......@@ -652,10 +652,7 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
}
cmd->common.command_id = req->tag;
if (ns)
trace_nvme_setup_nvm_cmd(req->q->id, cmd);
else
trace_nvme_setup_admin_cmd(cmd);
trace_nvme_setup_cmd(req, cmd);
return ret;
}
EXPORT_SYMBOL_GPL(nvme_setup_cmd);
......@@ -848,9 +845,6 @@ static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
if (unlikely(ctrl->kato == 0))
return;
INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd));
ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive;
schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
}
......@@ -3484,6 +3478,10 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work);
INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work);
INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd));
ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive;
ret = ida_simple_get(&nvme_instance_ida, 0, 0, GFP_KERNEL);
if (ret < 0)
goto out;
......
......@@ -1737,6 +1737,7 @@ nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
struct nvme_fc_queue *queue = &ctrl->queues[queue_idx];
nvme_req(rq)->ctrl = &ctrl->ctrl;
return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++);
}
......
......@@ -102,6 +102,7 @@ struct nvme_request {
u8 retries;
u8 flags;
u16 status;
struct nvme_ctrl *ctrl;
};
/*
......@@ -119,6 +120,13 @@ static inline struct nvme_request *nvme_req(struct request *req)
return blk_mq_rq_to_pdu(req);
}
static inline u16 nvme_req_qid(struct request *req)
{
if (!req->rq_disk)
return 0;
return blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(req)) + 1;
}
/* The below value is the specific amount of delay needed before checking
* readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the
* NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was
......
......@@ -418,6 +418,8 @@ static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req,
BUG_ON(!nvmeq);
iod->nvmeq = nvmeq;
nvme_req(req)->ctrl = &dev->ctrl;
return 0;
}
......
This diff is collapsed.
......@@ -128,3 +128,14 @@ const char *nvme_trace_parse_nvm_cmd(struct trace_seq *p,
return nvme_trace_common(p, cdw10);
}
}
const char *nvme_trace_disk_name(struct trace_seq *p, char *name)
{
const char *ret = trace_seq_buffer_ptr(p);
if (*name)
trace_seq_printf(p, "disk=%s, ", name);
trace_seq_putc(p, 0);
return ret;
}
......@@ -50,13 +50,8 @@
nvme_admin_opcode_name(nvme_admin_security_recv), \
nvme_admin_opcode_name(nvme_admin_sanitize_nvm))
const char *nvme_trace_parse_admin_cmd(struct trace_seq *p, u8 opcode,
u8 *cdw10);
#define __parse_nvme_admin_cmd(opcode, cdw10) \
nvme_trace_parse_admin_cmd(p, opcode, cdw10)
#define nvme_opcode_name(opcode) { opcode, #opcode }
#define show_opcode_name(val) \
#define show_nvm_opcode_name(val) \
__print_symbolic(val, \
nvme_opcode_name(nvme_cmd_flush), \
nvme_opcode_name(nvme_cmd_write), \
......@@ -70,85 +65,92 @@ const char *nvme_trace_parse_admin_cmd(struct trace_seq *p, u8 opcode,
nvme_opcode_name(nvme_cmd_resv_acquire), \
nvme_opcode_name(nvme_cmd_resv_release))
const char *nvme_trace_parse_nvm_cmd(struct trace_seq *p, u8 opcode,
u8 *cdw10);
#define __parse_nvme_cmd(opcode, cdw10) \
nvme_trace_parse_nvm_cmd(p, opcode, cdw10)
TRACE_EVENT(nvme_setup_admin_cmd,
TP_PROTO(struct nvme_command *cmd),
TP_ARGS(cmd),
TP_STRUCT__entry(
__field(u8, opcode)
__field(u8, flags)
__field(u16, cid)
__field(u64, metadata)
__array(u8, cdw10, 24)
),
TP_fast_assign(
__entry->opcode = cmd->common.opcode;
__entry->flags = cmd->common.flags;
__entry->cid = cmd->common.command_id;
__entry->metadata = le64_to_cpu(cmd->common.metadata);
memcpy(__entry->cdw10, cmd->common.cdw10,
sizeof(__entry->cdw10));
),
TP_printk(" cmdid=%u, flags=0x%x, meta=0x%llx, cmd=(%s %s)",
__entry->cid, __entry->flags, __entry->metadata,
show_admin_opcode_name(__entry->opcode),
__parse_nvme_admin_cmd(__entry->opcode, __entry->cdw10))
);
#define show_opcode_name(qid, opcode) \
(qid ? show_nvm_opcode_name(opcode) : show_admin_opcode_name(opcode))
TRACE_EVENT(nvme_setup_nvm_cmd,
TP_PROTO(int qid, struct nvme_command *cmd),
TP_ARGS(qid, cmd),
const char *nvme_trace_parse_admin_cmd(struct trace_seq *p, u8 opcode,
u8 *cdw10);
const char *nvme_trace_parse_nvm_cmd(struct trace_seq *p, u8 opcode,
u8 *cdw10);
#define parse_nvme_cmd(qid, opcode, cdw10) \
(qid ? \
nvme_trace_parse_nvm_cmd(p, opcode, cdw10) : \
nvme_trace_parse_admin_cmd(p, opcode, cdw10))
const char *nvme_trace_disk_name(struct trace_seq *p, char *name);
#define __print_disk_name(name) \
nvme_trace_disk_name(p, name)
#ifndef TRACE_HEADER_MULTI_READ
static inline void __assign_disk_name(char *name, struct gendisk *disk)
{
if (disk)
memcpy(name, disk->disk_name, DISK_NAME_LEN);
else
memset(name, 0, DISK_NAME_LEN);
}
#endif
TRACE_EVENT(nvme_setup_cmd,
TP_PROTO(struct request *req, struct nvme_command *cmd),
TP_ARGS(req, cmd),
TP_STRUCT__entry(
__field(int, qid)
__field(u8, opcode)
__field(u8, flags)
__field(u16, cid)
__field(u32, nsid)
__field(u64, metadata)
__array(u8, cdw10, 24)
__array(char, disk, DISK_NAME_LEN)
__field(int, ctrl_id)
__field(int, qid)
__field(u8, opcode)
__field(u8, flags)
__field(u16, cid)
__field(u32, nsid)
__field(u64, metadata)
__array(u8, cdw10, 24)
),
TP_fast_assign(
__entry->qid = qid;
__entry->opcode = cmd->common.opcode;
__entry->flags = cmd->common.flags;
__entry->cid = cmd->common.command_id;
__entry->nsid = le32_to_cpu(cmd->common.nsid);
__entry->metadata = le64_to_cpu(cmd->common.metadata);
memcpy(__entry->cdw10, cmd->common.cdw10,
sizeof(__entry->cdw10));
__entry->ctrl_id = nvme_req(req)->ctrl->instance;
__entry->qid = nvme_req_qid(req);
__entry->opcode = cmd->common.opcode;
__entry->flags = cmd->common.flags;
__entry->cid = cmd->common.command_id;
__entry->nsid = le32_to_cpu(cmd->common.nsid);
__entry->metadata = le64_to_cpu(cmd->common.metadata);
__assign_disk_name(__entry->disk, req->rq_disk);
memcpy(__entry->cdw10, cmd->common.cdw10,
sizeof(__entry->cdw10));
),
TP_printk("qid=%d, nsid=%u, cmdid=%u, flags=0x%x, meta=0x%llx, cmd=(%s %s)",
__entry->qid, __entry->nsid, __entry->cid,
TP_printk("nvme%d: %sqid=%d, cmdid=%u, nsid=%u, flags=0x%x, meta=0x%llx, cmd=(%s %s)",
__entry->ctrl_id, __print_disk_name(__entry->disk),
__entry->qid, __entry->cid, __entry->nsid,
__entry->flags, __entry->metadata,
show_opcode_name(__entry->opcode),
__parse_nvme_cmd(__entry->opcode, __entry->cdw10))
show_opcode_name(__entry->qid, __entry->opcode),
parse_nvme_cmd(__entry->qid, __entry->opcode, __entry->cdw10))
);
TRACE_EVENT(nvme_complete_rq,
TP_PROTO(struct request *req),
TP_ARGS(req),
TP_STRUCT__entry(
__field(int, qid)
__field(int, cid)
__field(u64, result)
__field(u8, retries)
__field(u8, flags)
__field(u16, status)
__array(char, disk, DISK_NAME_LEN)
__field(int, ctrl_id)
__field(int, qid)
__field(int, cid)
__field(u64, result)
__field(u8, retries)
__field(u8, flags)
__field(u16, status)
),
TP_fast_assign(
__entry->qid = req->q->id;
__entry->cid = req->tag;
__entry->result = le64_to_cpu(nvme_req(req)->result.u64);
__entry->retries = nvme_req(req)->retries;
__entry->flags = nvme_req(req)->flags;
__entry->status = nvme_req(req)->status;
__entry->ctrl_id = nvme_req(req)->ctrl->instance;
__entry->qid = nvme_req_qid(req);
__entry->cid = req->tag;
__entry->result = le64_to_cpu(nvme_req(req)->result.u64);
__entry->retries = nvme_req(req)->retries;
__entry->flags = nvme_req(req)->flags;
__entry->status = nvme_req(req)->status;
__assign_disk_name(__entry->disk, req->rq_disk);
),
TP_printk("qid=%d, cmdid=%u, res=%llu, retries=%u, flags=0x%x, status=%u",
TP_printk("nvme%d: %sqid=%d, cmdid=%u, res=%llu, retries=%u, flags=0x%x, status=%u",
__entry->ctrl_id, __print_disk_name(__entry->disk),
__entry->qid, __entry->cid, __entry->result,
__entry->retries, __entry->flags, __entry->status)
......
......@@ -128,6 +128,36 @@ static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
nvmet_req_complete(req, status);
}
static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
{
u16 status = NVME_SC_INTERNAL;
struct nvme_effects_log *log;
log = kzalloc(sizeof(*log), GFP_KERNEL);
if (!log)
goto out;
log->acs[nvme_admin_get_log_page] = cpu_to_le32(1 << 0);
log->acs[nvme_admin_identify] = cpu_to_le32(1 << 0);
log->acs[nvme_admin_abort_cmd] = cpu_to_le32(1 << 0);
log->acs[nvme_admin_set_features] = cpu_to_le32(1 << 0);
log->acs[nvme_admin_get_features] = cpu_to_le32(1 << 0);
log->acs[nvme_admin_async_event] = cpu_to_le32(1 << 0);
log->acs[nvme_admin_keep_alive] = cpu_to_le32(1 << 0);
log->iocs[nvme_cmd_read] = cpu_to_le32(1 << 0);
log->iocs[nvme_cmd_write] = cpu_to_le32(1 << 0);
log->iocs[nvme_cmd_flush] = cpu_to_le32(1 << 0);
log->iocs[nvme_cmd_dsm] = cpu_to_le32(1 << 0);
log->iocs[nvme_cmd_write_zeroes] = cpu_to_le32(1 << 0);
status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
kfree(log);
out:
nvmet_req_complete(req, status);
}
static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
......@@ -208,7 +238,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
/* first slot is read-only, only one slot supported */
id->frmw = (1 << 0) | (1 << 1);
id->lpa = (1 << 0) | (1 << 2);
id->lpa = (1 << 0) | (1 << 1) | (1 << 2);
id->elpe = NVMET_ERROR_LOG_SLOTS - 1;
id->npss = 0;
......@@ -238,14 +268,14 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
if (ctrl->ops->has_keyed_sgls)
id->sgls |= cpu_to_le32(1 << 2);
if (ctrl->ops->sqe_inline_size)
if (req->port->inline_data_size)
id->sgls |= cpu_to_le32(1 << 20);
strcpy(id->subnqn, ctrl->subsys->subsysnqn);
/* Max command capsule size is sqe + single page of in-capsule data */
id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
ctrl->ops->sqe_inline_size) / 16);
req->port->inline_data_size) / 16);
/* Max response capsule size is cqe */
id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
......@@ -308,7 +338,7 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
*/
id->nmic = (1 << 0);
memcpy(&id->nguid, &ns->nguid, sizeof(uuid_le));
memcpy(&id->nguid, &ns->nguid, sizeof(id->nguid));
id->lbaf[0].ds = ns->blksize_shift;
......@@ -586,6 +616,9 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
case NVME_LOG_CHANGED_NS:
req->execute = nvmet_execute_get_log_changed_ns;
return 0;
case NVME_LOG_CMD_EFFECTS:
req->execute = nvmet_execute_get_log_cmd_effects_ns;
return 0;
}
break;
case nvme_admin_identify:
......
......@@ -218,6 +218,35 @@ static ssize_t nvmet_addr_trsvcid_store(struct config_item *item,
CONFIGFS_ATTR(nvmet_, addr_trsvcid);
static ssize_t nvmet_param_inline_data_size_show(struct config_item *item,
char *page)
{
struct nvmet_port *port = to_nvmet_port(item);
return snprintf(page, PAGE_SIZE, "%d\n", port->inline_data_size);
}
static ssize_t nvmet_param_inline_data_size_store(struct config_item *item,
const char *page, size_t count)
{
struct nvmet_port *port = to_nvmet_port(item);
int ret;
if (port->enabled) {
pr_err("Cannot modify inline_data_size while port enabled\n");
pr_err("Disable the port before modifying\n");
return -EACCES;
}
ret = kstrtoint(page, 0, &port->inline_data_size);
if (ret) {
pr_err("Invalid value '%s' for inline_data_size\n", page);
return -EINVAL;
}
return count;
}
CONFIGFS_ATTR(nvmet_, param_inline_data_size);
static ssize_t nvmet_addr_trtype_show(struct config_item *item,
char *page)
{
......@@ -407,11 +436,40 @@ static ssize_t nvmet_ns_enable_store(struct config_item *item,
CONFIGFS_ATTR(nvmet_ns_, enable);
static ssize_t nvmet_ns_buffered_io_show(struct config_item *item, char *page)
{
return sprintf(page, "%d\n", to_nvmet_ns(item)->buffered_io);
}
static ssize_t nvmet_ns_buffered_io_store(struct config_item *item,
const char *page, size_t count)
{
struct nvmet_ns *ns = to_nvmet_ns(item);
bool val;
if (strtobool(page, &val))
return -EINVAL;
mutex_lock(&ns->subsys->lock);
if (ns->enabled) {
pr_err("disable ns before setting buffered_io value.\n");
mutex_unlock(&ns->subsys->lock);
return -EINVAL;
}
ns->buffered_io = val;
mutex_unlock(&ns->subsys->lock);
return count;
}
CONFIGFS_ATTR(nvmet_ns_, buffered_io);
static struct configfs_attribute *nvmet_ns_attrs[] = {
&nvmet_ns_attr_device_path,
&nvmet_ns_attr_device_nguid,
&nvmet_ns_attr_device_uuid,
&nvmet_ns_attr_enable,
&nvmet_ns_attr_buffered_io,
NULL,
};
......@@ -874,6 +932,7 @@ static struct configfs_attribute *nvmet_port_attrs[] = {
&nvmet_attr_addr_traddr,
&nvmet_attr_addr_trsvcid,
&nvmet_attr_addr_trtype,
&nvmet_attr_param_inline_data_size,
NULL,
};
......@@ -903,6 +962,7 @@ static struct config_group *nvmet_ports_make(struct config_group *group,
INIT_LIST_HEAD(&port->entry);
INIT_LIST_HEAD(&port->subsystems);
INIT_LIST_HEAD(&port->referrals);
port->inline_data_size = -1; /* < 0 == let the transport choose */
port->disc_addr.portid = cpu_to_le16(portid);
config_group_init_type_name(&port->group, name, &nvmet_port_type);
......
......@@ -18,6 +18,7 @@
#include "nvmet.h"
struct workqueue_struct *buffered_io_wq;
static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
static DEFINE_IDA(cntlid_ida);
......@@ -241,6 +242,10 @@ int nvmet_enable_port(struct nvmet_port *port)
return ret;
}
/* If the transport didn't set inline_data_size, then disable it. */
if (port->inline_data_size < 0)
port->inline_data_size = 0;
port->enabled = true;
return 0;
}
......@@ -437,6 +442,7 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
ns->nsid = nsid;
ns->subsys = subsys;
uuid_gen(&ns->uuid);
ns->buffered_io = false;
return ns;
}
......@@ -1109,6 +1115,12 @@ static int __init nvmet_init(void)
{
int error;
buffered_io_wq = alloc_workqueue("nvmet-buffered-io-wq",
WQ_MEM_RECLAIM, 0);
if (!buffered_io_wq) {
error = -ENOMEM;
goto out;
}
error = nvmet_init_discovery();
if (error)
goto out;
......@@ -1129,6 +1141,7 @@ static void __exit nvmet_exit(void)
nvmet_exit_configfs();
nvmet_exit_discovery();
ida_destroy(&cntlid_ida);
destroy_workqueue(buffered_io_wq);
BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
......
......@@ -171,7 +171,7 @@ static void nvmet_execute_identify_disc_ctrl(struct nvmet_req *req)
id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
if (ctrl->ops->has_keyed_sgls)
id->sgls |= cpu_to_le32(1 << 2);
if (ctrl->ops->sqe_inline_size)
if (req->port->inline_data_size)
id->sgls |= cpu_to_le32(1 << 20);
strcpy(id->subnqn, ctrl->subsys->subsysnqn);
......
......@@ -16,6 +16,8 @@
void nvmet_file_ns_disable(struct nvmet_ns *ns)
{
if (ns->file) {
if (ns->buffered_io)
flush_workqueue(buffered_io_wq);
mempool_destroy(ns->bvec_pool);
ns->bvec_pool = NULL;
kmem_cache_destroy(ns->bvec_cache);
......@@ -27,11 +29,14 @@ void nvmet_file_ns_disable(struct nvmet_ns *ns)
int nvmet_file_ns_enable(struct nvmet_ns *ns)
{
int ret;
int flags = O_RDWR | O_LARGEFILE;
struct kstat stat;
int ret;
ns->file = filp_open(ns->device_path,
O_RDWR | O_LARGEFILE | O_DIRECT, 0);
if (!ns->buffered_io)
flags |= O_DIRECT;
ns->file = filp_open(ns->device_path, flags, 0);
if (IS_ERR(ns->file)) {
pr_err("failed to open file %s: (%ld)\n",
ns->device_path, PTR_ERR(ns->file));
......@@ -100,7 +105,7 @@ static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos,
iocb->ki_pos = pos;
iocb->ki_filp = req->ns->file;
iocb->ki_flags = IOCB_DIRECT | ki_flags;
iocb->ki_flags = ki_flags | iocb_flags(req->ns->file);
ret = call_iter(iocb, &iter);
......@@ -140,6 +145,12 @@ static void nvmet_file_execute_rw(struct nvmet_req *req)
return;
}
pos = le64_to_cpu(req->cmd->rw.slba) << req->ns->blksize_shift;
if (unlikely(pos + req->data_len > req->ns->size)) {
nvmet_req_complete(req, NVME_SC_LBA_RANGE | NVME_SC_DNR);
return;
}
if (nr_bvec > NVMET_MAX_INLINE_BIOVEC)
req->f.bvec = kmalloc_array(nr_bvec, sizeof(struct bio_vec),
GFP_KERNEL);
......@@ -155,8 +166,6 @@ static void nvmet_file_execute_rw(struct nvmet_req *req)
is_sync = true;
}
pos = le64_to_cpu(req->cmd->rw.slba) << req->ns->blksize_shift;
memset(&req->f.iocb, 0, sizeof(struct kiocb));
for_each_sg_page(req->sg, &sg_pg_iter, req->sg_cnt, 0) {
nvmet_file_init_bvec(&req->f.bvec[bv_cnt], &sg_pg_iter);
......@@ -189,6 +198,19 @@ static void nvmet_file_execute_rw(struct nvmet_req *req)
nvmet_file_submit_bvec(req, pos, bv_cnt, total_len);
}
static void nvmet_file_buffered_io_work(struct work_struct *w)
{
struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
nvmet_file_execute_rw(req);
}
static void nvmet_file_execute_rw_buffered_io(struct nvmet_req *req)
{
INIT_WORK(&req->f.work, nvmet_file_buffered_io_work);
queue_work(buffered_io_wq, &req->f.work);
}
static void nvmet_file_flush_work(struct work_struct *w)
{
struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
......@@ -209,22 +231,30 @@ static void nvmet_file_execute_discard(struct nvmet_req *req)
{
int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
struct nvme_dsm_range range;
loff_t offset;
loff_t len;
int i, ret;
loff_t offset, len;
u16 ret;
int i;
for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) {
if (nvmet_copy_from_sgl(req, i * sizeof(range), &range,
sizeof(range)))
ret = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
sizeof(range));
if (ret)
break;
offset = le64_to_cpu(range.slba) << req->ns->blksize_shift;
len = le32_to_cpu(range.nlb) << req->ns->blksize_shift;
ret = vfs_fallocate(req->ns->file, mode, offset, len);
if (ret)
if (offset + len > req->ns->size) {
ret = NVME_SC_LBA_RANGE | NVME_SC_DNR;
break;
}
if (vfs_fallocate(req->ns->file, mode, offset, len)) {
ret = NVME_SC_INTERNAL | NVME_SC_DNR;
break;
}
}
nvmet_req_complete(req, ret < 0 ? NVME_SC_INTERNAL | NVME_SC_DNR : 0);
nvmet_req_complete(req, ret);
}
static void nvmet_file_dsm_work(struct work_struct *w)
......@@ -263,6 +293,11 @@ static void nvmet_file_write_zeroes_work(struct work_struct *w)
len = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
req->ns->blksize_shift);
if (unlikely(offset + len > req->ns->size)) {
nvmet_req_complete(req, NVME_SC_LBA_RANGE | NVME_SC_DNR);
return;
}
ret = vfs_fallocate(req->ns->file, mode, offset, len);
nvmet_req_complete(req, ret < 0 ? NVME_SC_INTERNAL | NVME_SC_DNR : 0);
}
......@@ -280,7 +315,10 @@ u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
switch (cmd->common.opcode) {
case nvme_cmd_read:
case nvme_cmd_write:
req->execute = nvmet_file_execute_rw;
if (req->ns->buffered_io)
req->execute = nvmet_file_execute_rw_buffered_io;
else
req->execute = nvmet_file_execute_rw;
req->data_len = nvmet_rw_len(req);
return 0;
case nvme_cmd_flush:
......
......@@ -227,6 +227,7 @@ static int nvme_loop_init_request(struct blk_mq_tag_set *set,
{
struct nvme_loop_ctrl *ctrl = set->driver_data;
nvme_req(req)->ctrl = &ctrl->ctrl;
return nvme_loop_init_iod(ctrl, blk_mq_rq_to_pdu(req),
(set == &ctrl->tag_set) ? hctx_idx + 1 : 0);
}
......
......@@ -65,6 +65,7 @@ struct nvmet_ns {
u8 nguid[16];
uuid_t uuid;
bool buffered_io;
bool enabled;
struct nvmet_subsys *subsys;
const char *device_path;
......@@ -116,6 +117,7 @@ struct nvmet_port {
struct list_head referrals;
void *priv;
bool enabled;
int inline_data_size;
};
static inline struct nvmet_port *to_nvmet_port(struct config_item *item)
......@@ -225,7 +227,6 @@ struct nvmet_req;
struct nvmet_fabrics_ops {
struct module *owner;
unsigned int type;
unsigned int sqe_inline_size;
unsigned int msdbd;
bool has_keyed_sgls : 1;
void (*queue_response)(struct nvmet_req *req);
......@@ -269,6 +270,8 @@ struct nvmet_req {
const struct nvmet_fabrics_ops *ops;
};
extern struct workqueue_struct *buffered_io_wq;
static inline void nvmet_set_status(struct nvmet_req *req, u16 status)
{
req->rsp->status = cpu_to_le16(status << 1);
......
This diff is collapsed.
......@@ -749,6 +749,11 @@ enum {
NVME_FEAT_HOST_MEM_BUF = 0x0d,
NVME_FEAT_TIMESTAMP = 0x0e,
NVME_FEAT_KATO = 0x0f,
NVME_FEAT_HCTM = 0x10,
NVME_FEAT_NOPSC = 0x11,
NVME_FEAT_RRL = 0x12,
NVME_FEAT_PLM_CONFIG = 0x13,
NVME_FEAT_PLM_WINDOW = 0x14,
NVME_FEAT_SW_PROGRESS = 0x80,
NVME_FEAT_HOST_ID = 0x81,
NVME_FEAT_RESV_MASK = 0x82,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment