Commit 55eb942e authored by Chaitanya Kulkarni's avatar Chaitanya Kulkarni Committed by Christoph Hellwig

nvmet: add buffered I/O support for file backed ns

Add a new "buffered_io" attribute, which disabled direct I/O and thus
enables page cache based caching when enabled.   The attribute can only
be changed when the namespace is disabled as the file has to be reopend
for the change to take effect.

The possibly blocking read/write are deferred to a newly introduced
global workqueue.
Signed-off-by: default avatarChaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent 0866bf0c
...@@ -407,11 +407,40 @@ static ssize_t nvmet_ns_enable_store(struct config_item *item, ...@@ -407,11 +407,40 @@ static ssize_t nvmet_ns_enable_store(struct config_item *item,
CONFIGFS_ATTR(nvmet_ns_, enable); CONFIGFS_ATTR(nvmet_ns_, enable);
static ssize_t nvmet_ns_buffered_io_show(struct config_item *item, char *page)
{
return sprintf(page, "%d\n", to_nvmet_ns(item)->buffered_io);
}
static ssize_t nvmet_ns_buffered_io_store(struct config_item *item,
const char *page, size_t count)
{
struct nvmet_ns *ns = to_nvmet_ns(item);
bool val;
if (strtobool(page, &val))
return -EINVAL;
mutex_lock(&ns->subsys->lock);
if (ns->enabled) {
pr_err("disable ns before setting buffered_io value.\n");
mutex_unlock(&ns->subsys->lock);
return -EINVAL;
}
ns->buffered_io = val;
mutex_unlock(&ns->subsys->lock);
return count;
}
CONFIGFS_ATTR(nvmet_ns_, buffered_io);
static struct configfs_attribute *nvmet_ns_attrs[] = { static struct configfs_attribute *nvmet_ns_attrs[] = {
&nvmet_ns_attr_device_path, &nvmet_ns_attr_device_path,
&nvmet_ns_attr_device_nguid, &nvmet_ns_attr_device_nguid,
&nvmet_ns_attr_device_uuid, &nvmet_ns_attr_device_uuid,
&nvmet_ns_attr_enable, &nvmet_ns_attr_enable,
&nvmet_ns_attr_buffered_io,
NULL, NULL,
}; };
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include "nvmet.h" #include "nvmet.h"
struct workqueue_struct *buffered_io_wq;
static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX]; static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
static DEFINE_IDA(cntlid_ida); static DEFINE_IDA(cntlid_ida);
...@@ -437,6 +438,7 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid) ...@@ -437,6 +438,7 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
ns->nsid = nsid; ns->nsid = nsid;
ns->subsys = subsys; ns->subsys = subsys;
uuid_gen(&ns->uuid); uuid_gen(&ns->uuid);
ns->buffered_io = false;
return ns; return ns;
} }
...@@ -1109,6 +1111,12 @@ static int __init nvmet_init(void) ...@@ -1109,6 +1111,12 @@ static int __init nvmet_init(void)
{ {
int error; int error;
buffered_io_wq = alloc_workqueue("nvmet-buffered-io-wq",
WQ_MEM_RECLAIM, 0);
if (!buffered_io_wq) {
error = -ENOMEM;
goto out;
}
error = nvmet_init_discovery(); error = nvmet_init_discovery();
if (error) if (error)
goto out; goto out;
...@@ -1129,6 +1137,7 @@ static void __exit nvmet_exit(void) ...@@ -1129,6 +1137,7 @@ static void __exit nvmet_exit(void)
nvmet_exit_configfs(); nvmet_exit_configfs();
nvmet_exit_discovery(); nvmet_exit_discovery();
ida_destroy(&cntlid_ida); ida_destroy(&cntlid_ida);
destroy_workqueue(buffered_io_wq);
BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024); BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024); BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
......
...@@ -16,6 +16,8 @@ ...@@ -16,6 +16,8 @@
void nvmet_file_ns_disable(struct nvmet_ns *ns) void nvmet_file_ns_disable(struct nvmet_ns *ns)
{ {
if (ns->file) { if (ns->file) {
if (ns->buffered_io)
flush_workqueue(buffered_io_wq);
mempool_destroy(ns->bvec_pool); mempool_destroy(ns->bvec_pool);
ns->bvec_pool = NULL; ns->bvec_pool = NULL;
kmem_cache_destroy(ns->bvec_cache); kmem_cache_destroy(ns->bvec_cache);
...@@ -27,11 +29,14 @@ void nvmet_file_ns_disable(struct nvmet_ns *ns) ...@@ -27,11 +29,14 @@ void nvmet_file_ns_disable(struct nvmet_ns *ns)
int nvmet_file_ns_enable(struct nvmet_ns *ns) int nvmet_file_ns_enable(struct nvmet_ns *ns)
{ {
int ret; int flags = O_RDWR | O_LARGEFILE;
struct kstat stat; struct kstat stat;
int ret;
if (!ns->buffered_io)
flags |= O_DIRECT;
ns->file = filp_open(ns->device_path, ns->file = filp_open(ns->device_path, flags, 0);
O_RDWR | O_LARGEFILE | O_DIRECT, 0);
if (IS_ERR(ns->file)) { if (IS_ERR(ns->file)) {
pr_err("failed to open file %s: (%ld)\n", pr_err("failed to open file %s: (%ld)\n",
ns->device_path, PTR_ERR(ns->file)); ns->device_path, PTR_ERR(ns->file));
...@@ -100,7 +105,7 @@ static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos, ...@@ -100,7 +105,7 @@ static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos,
iocb->ki_pos = pos; iocb->ki_pos = pos;
iocb->ki_filp = req->ns->file; iocb->ki_filp = req->ns->file;
iocb->ki_flags = IOCB_DIRECT | ki_flags; iocb->ki_flags = ki_flags | iocb_flags(req->ns->file);
ret = call_iter(iocb, &iter); ret = call_iter(iocb, &iter);
...@@ -189,6 +194,19 @@ static void nvmet_file_execute_rw(struct nvmet_req *req) ...@@ -189,6 +194,19 @@ static void nvmet_file_execute_rw(struct nvmet_req *req)
nvmet_file_submit_bvec(req, pos, bv_cnt, total_len); nvmet_file_submit_bvec(req, pos, bv_cnt, total_len);
} }
static void nvmet_file_buffered_io_work(struct work_struct *w)
{
struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
nvmet_file_execute_rw(req);
}
static void nvmet_file_execute_rw_buffered_io(struct nvmet_req *req)
{
INIT_WORK(&req->f.work, nvmet_file_buffered_io_work);
queue_work(buffered_io_wq, &req->f.work);
}
static void nvmet_file_flush_work(struct work_struct *w) static void nvmet_file_flush_work(struct work_struct *w)
{ {
struct nvmet_req *req = container_of(w, struct nvmet_req, f.work); struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
...@@ -280,6 +298,9 @@ u16 nvmet_file_parse_io_cmd(struct nvmet_req *req) ...@@ -280,6 +298,9 @@ u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
switch (cmd->common.opcode) { switch (cmd->common.opcode) {
case nvme_cmd_read: case nvme_cmd_read:
case nvme_cmd_write: case nvme_cmd_write:
if (req->ns->buffered_io)
req->execute = nvmet_file_execute_rw_buffered_io;
else
req->execute = nvmet_file_execute_rw; req->execute = nvmet_file_execute_rw;
req->data_len = nvmet_rw_len(req); req->data_len = nvmet_rw_len(req);
return 0; return 0;
......
...@@ -65,6 +65,7 @@ struct nvmet_ns { ...@@ -65,6 +65,7 @@ struct nvmet_ns {
u8 nguid[16]; u8 nguid[16];
uuid_t uuid; uuid_t uuid;
bool buffered_io;
bool enabled; bool enabled;
struct nvmet_subsys *subsys; struct nvmet_subsys *subsys;
const char *device_path; const char *device_path;
...@@ -269,6 +270,8 @@ struct nvmet_req { ...@@ -269,6 +270,8 @@ struct nvmet_req {
const struct nvmet_fabrics_ops *ops; const struct nvmet_fabrics_ops *ops;
}; };
extern struct workqueue_struct *buffered_io_wq;
static inline void nvmet_set_status(struct nvmet_req *req, u16 status) static inline void nvmet_set_status(struct nvmet_req *req, u16 status)
{ {
req->rsp->status = cpu_to_le16(status << 1); req->rsp->status = cpu_to_le16(status << 1);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment