Commit 0f7223a3 authored by Jens Axboe's avatar Jens Axboe

Merge tag 'nvme-6.9-2024-03-07' of git://git.infradead.org/nvme into for-6.9/block

Pull NVMe updates from Keith:

"nvme updates for Linux 6.9

 - RDMA target enhancements (Max)
 - Fabrics fixes (Max, Guixin, Hannes)
 - Atomic queue_limits usage (Christoph)
 - Const use for class_register (Ricardo)
 - Identification error handling fixes (Shin'ichiro, Keith)"

* tag 'nvme-6.9-2024-03-07' of git://git.infradead.org/nvme: (31 commits)
  nvme: clear caller pointer on identify failure
  nvme: host: fix double-free of struct nvme_id_ns in ns_update_nuse()
  nvme: fcloop: make fcloop_class constant
  nvme: fabrics: make nvmf_class constant
  nvme: core: constify struct class usage
  nvme-fabrics: typo in nvmf_parse_key()
  nvme-multipath: use atomic queue limits API for stacking limits
  nvme-multipath: pass queue_limits to blk_alloc_disk
  nvme: use the atomic queue limits update API
  nvme: cleanup nvme_configure_metadata
  nvme: don't query identify data in configure_metadata
  nvme: split out a nvme_identify_ns_nvm helper
  nvme: move common logic into nvme_update_ns_info
  nvme: move setting the write cache flags out of nvme_set_queue_limits
  nvme: move a few things out of nvme_update_disk_info
  nvme: don't use nvme_update_disk_info for the multipath disk
  nvme: move blk_integrity_unregister into nvme_init_integrity
  nvme: cleanup the nvme_init_integrity calling conventions
  nvme: move max_integrity_segments handling out of nvme_init_integrity
  nvme: remove nvme_revalidate_zones
  ...
parents d37977f0 7e80eb79
This diff is collapsed.
...@@ -637,7 +637,7 @@ static struct key *nvmf_parse_key(int key_id) ...@@ -637,7 +637,7 @@ static struct key *nvmf_parse_key(int key_id)
} }
key = key_lookup(key_id); key = key_lookup(key_id);
if (!IS_ERR(key)) if (IS_ERR(key))
pr_err("key id %08x not found\n", key_id); pr_err("key id %08x not found\n", key_id);
else else
pr_debug("Using key id %08x\n", key_id); pr_debug("Using key id %08x\n", key_id);
...@@ -1318,7 +1318,10 @@ nvmf_create_ctrl(struct device *dev, const char *buf) ...@@ -1318,7 +1318,10 @@ nvmf_create_ctrl(struct device *dev, const char *buf)
return ERR_PTR(ret); return ERR_PTR(ret);
} }
static struct class *nvmf_class; static const struct class nvmf_class = {
.name = "nvme-fabrics",
};
static struct device *nvmf_device; static struct device *nvmf_device;
static DEFINE_MUTEX(nvmf_dev_mutex); static DEFINE_MUTEX(nvmf_dev_mutex);
...@@ -1438,15 +1441,14 @@ static int __init nvmf_init(void) ...@@ -1438,15 +1441,14 @@ static int __init nvmf_init(void)
if (!nvmf_default_host) if (!nvmf_default_host)
return -ENOMEM; return -ENOMEM;
nvmf_class = class_create("nvme-fabrics"); ret = class_register(&nvmf_class);
if (IS_ERR(nvmf_class)) { if (ret) {
pr_err("couldn't register class nvme-fabrics\n"); pr_err("couldn't register class nvme-fabrics\n");
ret = PTR_ERR(nvmf_class);
goto out_free_host; goto out_free_host;
} }
nvmf_device = nvmf_device =
device_create(nvmf_class, NULL, MKDEV(0, 0), NULL, "ctl"); device_create(&nvmf_class, NULL, MKDEV(0, 0), NULL, "ctl");
if (IS_ERR(nvmf_device)) { if (IS_ERR(nvmf_device)) {
pr_err("couldn't create nvme-fabrics device!\n"); pr_err("couldn't create nvme-fabrics device!\n");
ret = PTR_ERR(nvmf_device); ret = PTR_ERR(nvmf_device);
...@@ -1462,9 +1464,9 @@ static int __init nvmf_init(void) ...@@ -1462,9 +1464,9 @@ static int __init nvmf_init(void)
return 0; return 0;
out_destroy_device: out_destroy_device:
device_destroy(nvmf_class, MKDEV(0, 0)); device_destroy(&nvmf_class, MKDEV(0, 0));
out_destroy_class: out_destroy_class:
class_destroy(nvmf_class); class_unregister(&nvmf_class);
out_free_host: out_free_host:
nvmf_host_put(nvmf_default_host); nvmf_host_put(nvmf_default_host);
return ret; return ret;
...@@ -1473,8 +1475,8 @@ static int __init nvmf_init(void) ...@@ -1473,8 +1475,8 @@ static int __init nvmf_init(void)
static void __exit nvmf_exit(void) static void __exit nvmf_exit(void)
{ {
misc_deregister(&nvmf_misc); misc_deregister(&nvmf_misc);
device_destroy(nvmf_class, MKDEV(0, 0)); device_destroy(&nvmf_class, MKDEV(0, 0));
class_destroy(nvmf_class); class_unregister(&nvmf_class);
nvmf_host_put(nvmf_default_host); nvmf_host_put(nvmf_default_host);
BUILD_BUG_ON(sizeof(struct nvmf_common_command) != 64); BUILD_BUG_ON(sizeof(struct nvmf_common_command) != 64);
......
...@@ -516,6 +516,7 @@ static void nvme_requeue_work(struct work_struct *work) ...@@ -516,6 +516,7 @@ static void nvme_requeue_work(struct work_struct *work)
int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
{ {
struct queue_limits lim;
bool vwc = false; bool vwc = false;
mutex_init(&head->lock); mutex_init(&head->lock);
...@@ -532,7 +533,12 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) ...@@ -532,7 +533,12 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
!nvme_is_unique_nsid(ctrl, head) || !multipath) !nvme_is_unique_nsid(ctrl, head) || !multipath)
return 0; return 0;
head->disk = blk_alloc_disk(NULL, ctrl->numa_node); blk_set_stacking_limits(&lim);
lim.dma_alignment = 3;
if (head->ids.csi != NVME_CSI_ZNS)
lim.max_zone_append_sectors = 0;
head->disk = blk_alloc_disk(&lim, ctrl->numa_node);
if (IS_ERR(head->disk)) if (IS_ERR(head->disk))
return PTR_ERR(head->disk); return PTR_ERR(head->disk);
head->disk->fops = &nvme_ns_head_ops; head->disk->fops = &nvme_ns_head_ops;
...@@ -553,11 +559,6 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) ...@@ -553,11 +559,6 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
ctrl->tagset->map[HCTX_TYPE_POLL].nr_queues) ctrl->tagset->map[HCTX_TYPE_POLL].nr_queues)
blk_queue_flag_set(QUEUE_FLAG_POLL, head->disk->queue); blk_queue_flag_set(QUEUE_FLAG_POLL, head->disk->queue);
/* set to a default value of 512 until the disk is validated */
blk_queue_logical_block_size(head->disk->queue, 512);
blk_set_stacking_limits(&head->disk->queue->limits);
blk_queue_dma_alignment(head->disk->queue, 3);
/* we need to propagate up the VMC settings */ /* we need to propagate up the VMC settings */
if (ctrl->vwc & NVME_CTRL_VWC_PRESENT) if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
vwc = true; vwc = true;
......
...@@ -1036,11 +1036,11 @@ static inline bool nvme_disk_is_ns_head(struct gendisk *disk) ...@@ -1036,11 +1036,11 @@ static inline bool nvme_disk_is_ns_head(struct gendisk *disk)
} }
#endif /* CONFIG_NVME_MULTIPATH */ #endif /* CONFIG_NVME_MULTIPATH */
int nvme_revalidate_zones(struct nvme_ns *ns);
int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector, int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data); unsigned int nr_zones, report_zones_cb cb, void *data);
int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf,
struct queue_limits *lim);
#ifdef CONFIG_BLK_DEV_ZONED #ifdef CONFIG_BLK_DEV_ZONED
int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf);
blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req, blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req,
struct nvme_command *cmnd, struct nvme_command *cmnd,
enum nvme_zone_mgmt_action action); enum nvme_zone_mgmt_action action);
...@@ -1051,13 +1051,6 @@ static inline blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, ...@@ -1051,13 +1051,6 @@ static inline blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns,
{ {
return BLK_STS_NOTSUPP; return BLK_STS_NOTSUPP;
} }
static inline int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
{
dev_warn(ns->ctrl->device,
"Please enable CONFIG_BLK_DEV_ZONED to support ZNS devices\n");
return -EPROTONOSUPPORT;
}
#endif #endif
static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev) static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
......
...@@ -1006,6 +1006,7 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new) ...@@ -1006,6 +1006,7 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
{ {
int ret; int ret;
bool changed; bool changed;
u16 max_queue_size;
ret = nvme_rdma_configure_admin_queue(ctrl, new); ret = nvme_rdma_configure_admin_queue(ctrl, new);
if (ret) if (ret)
...@@ -1030,11 +1031,16 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new) ...@@ -1030,11 +1031,16 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
ctrl->ctrl.opts->queue_size, ctrl->ctrl.sqsize + 1); ctrl->ctrl.opts->queue_size, ctrl->ctrl.sqsize + 1);
} }
if (ctrl->ctrl.sqsize + 1 > NVME_RDMA_MAX_QUEUE_SIZE) { if (ctrl->ctrl.max_integrity_segments)
max_queue_size = NVME_RDMA_MAX_METADATA_QUEUE_SIZE;
else
max_queue_size = NVME_RDMA_MAX_QUEUE_SIZE;
if (ctrl->ctrl.sqsize + 1 > max_queue_size) {
dev_warn(ctrl->ctrl.device, dev_warn(ctrl->ctrl.device,
"ctrl sqsize %u > max queue size %u, clamping down\n", "ctrl sqsize %u > max queue size %u, clamping down\n",
ctrl->ctrl.sqsize + 1, NVME_RDMA_MAX_QUEUE_SIZE); ctrl->ctrl.sqsize + 1, max_queue_size);
ctrl->ctrl.sqsize = NVME_RDMA_MAX_QUEUE_SIZE - 1; ctrl->ctrl.sqsize = max_queue_size - 1;
} }
if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) { if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) {
......
...@@ -221,14 +221,11 @@ static int ns_update_nuse(struct nvme_ns *ns) ...@@ -221,14 +221,11 @@ static int ns_update_nuse(struct nvme_ns *ns)
ret = nvme_identify_ns(ns->ctrl, ns->head->ns_id, &id); ret = nvme_identify_ns(ns->ctrl, ns->head->ns_id, &id);
if (ret) if (ret)
goto out_free_id; return ret;
ns->head->nuse = le64_to_cpu(id->nuse); ns->head->nuse = le64_to_cpu(id->nuse);
out_free_id:
kfree(id); kfree(id);
return 0;
return ret;
} }
static ssize_t nuse_show(struct device *dev, struct device_attribute *attr, static ssize_t nuse_show(struct device *dev, struct device_attribute *attr,
......
...@@ -7,16 +7,6 @@ ...@@ -7,16 +7,6 @@
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include "nvme.h" #include "nvme.h"
int nvme_revalidate_zones(struct nvme_ns *ns)
{
struct request_queue *q = ns->queue;
blk_queue_chunk_sectors(q, ns->head->zsze);
blk_queue_max_zone_append_sectors(q, ns->ctrl->max_zone_append);
return blk_revalidate_disk_zones(ns->disk, NULL);
}
static int nvme_set_max_append(struct nvme_ctrl *ctrl) static int nvme_set_max_append(struct nvme_ctrl *ctrl)
{ {
struct nvme_command c = { }; struct nvme_command c = { };
...@@ -45,10 +35,10 @@ static int nvme_set_max_append(struct nvme_ctrl *ctrl) ...@@ -45,10 +35,10 @@ static int nvme_set_max_append(struct nvme_ctrl *ctrl)
return 0; return 0;
} }
int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf) int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf,
struct queue_limits *lim)
{ {
struct nvme_effects_log *log = ns->head->effects; struct nvme_effects_log *log = ns->head->effects;
struct request_queue *q = ns->queue;
struct nvme_command c = { }; struct nvme_command c = { };
struct nvme_id_ns_zns *id; struct nvme_id_ns_zns *id;
int status; int status;
...@@ -109,10 +99,12 @@ int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf) ...@@ -109,10 +99,12 @@ int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
goto free_data; goto free_data;
} }
disk_set_zoned(ns->disk); blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, ns->queue);
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q); lim->zoned = 1;
disk_set_max_open_zones(ns->disk, le32_to_cpu(id->mor) + 1); lim->max_open_zones = le32_to_cpu(id->mor) + 1;
disk_set_max_active_zones(ns->disk, le32_to_cpu(id->mar) + 1); lim->max_active_zones = le32_to_cpu(id->mar) + 1;
lim->chunk_sectors = ns->head->zsze;
lim->max_zone_append_sectors = ns->ctrl->max_zone_append;
free_data: free_data:
kfree(id); kfree(id);
return status; return status;
......
...@@ -428,7 +428,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) ...@@ -428,7 +428,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
id->cqes = (0x4 << 4) | 0x4; id->cqes = (0x4 << 4) | 0x4;
/* no enforcement soft-limit for maxcmd - pick arbitrary high value */ /* no enforcement soft-limit for maxcmd - pick arbitrary high value */
id->maxcmd = cpu_to_le16(NVMET_MAX_CMD); id->maxcmd = cpu_to_le16(NVMET_MAX_CMD(ctrl));
id->nn = cpu_to_le32(NVMET_MAX_NAMESPACES); id->nn = cpu_to_le32(NVMET_MAX_NAMESPACES);
id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES); id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
......
...@@ -273,6 +273,32 @@ static ssize_t nvmet_param_inline_data_size_store(struct config_item *item, ...@@ -273,6 +273,32 @@ static ssize_t nvmet_param_inline_data_size_store(struct config_item *item,
CONFIGFS_ATTR(nvmet_, param_inline_data_size); CONFIGFS_ATTR(nvmet_, param_inline_data_size);
static ssize_t nvmet_param_max_queue_size_show(struct config_item *item,
char *page)
{
struct nvmet_port *port = to_nvmet_port(item);
return snprintf(page, PAGE_SIZE, "%d\n", port->max_queue_size);
}
static ssize_t nvmet_param_max_queue_size_store(struct config_item *item,
const char *page, size_t count)
{
struct nvmet_port *port = to_nvmet_port(item);
int ret;
if (nvmet_is_port_enabled(port, __func__))
return -EACCES;
ret = kstrtoint(page, 0, &port->max_queue_size);
if (ret) {
pr_err("Invalid value '%s' for max_queue_size\n", page);
return -EINVAL;
}
return count;
}
CONFIGFS_ATTR(nvmet_, param_max_queue_size);
#ifdef CONFIG_BLK_DEV_INTEGRITY #ifdef CONFIG_BLK_DEV_INTEGRITY
static ssize_t nvmet_param_pi_enable_show(struct config_item *item, static ssize_t nvmet_param_pi_enable_show(struct config_item *item,
char *page) char *page)
...@@ -1859,6 +1885,7 @@ static struct configfs_attribute *nvmet_port_attrs[] = { ...@@ -1859,6 +1885,7 @@ static struct configfs_attribute *nvmet_port_attrs[] = {
&nvmet_attr_addr_trtype, &nvmet_attr_addr_trtype,
&nvmet_attr_addr_tsas, &nvmet_attr_addr_tsas,
&nvmet_attr_param_inline_data_size, &nvmet_attr_param_inline_data_size,
&nvmet_attr_param_max_queue_size,
#ifdef CONFIG_BLK_DEV_INTEGRITY #ifdef CONFIG_BLK_DEV_INTEGRITY
&nvmet_attr_param_pi_enable, &nvmet_attr_param_pi_enable,
#endif #endif
...@@ -1917,6 +1944,7 @@ static struct config_group *nvmet_ports_make(struct config_group *group, ...@@ -1917,6 +1944,7 @@ static struct config_group *nvmet_ports_make(struct config_group *group,
INIT_LIST_HEAD(&port->subsystems); INIT_LIST_HEAD(&port->subsystems);
INIT_LIST_HEAD(&port->referrals); INIT_LIST_HEAD(&port->referrals);
port->inline_data_size = -1; /* < 0 == let the transport choose */ port->inline_data_size = -1; /* < 0 == let the transport choose */
port->max_queue_size = -1; /* < 0 == let the transport choose */
port->disc_addr.portid = cpu_to_le16(portid); port->disc_addr.portid = cpu_to_le16(portid);
port->disc_addr.adrfam = NVMF_ADDR_FAMILY_MAX; port->disc_addr.adrfam = NVMF_ADDR_FAMILY_MAX;
......
...@@ -358,6 +358,18 @@ int nvmet_enable_port(struct nvmet_port *port) ...@@ -358,6 +358,18 @@ int nvmet_enable_port(struct nvmet_port *port)
if (port->inline_data_size < 0) if (port->inline_data_size < 0)
port->inline_data_size = 0; port->inline_data_size = 0;
/*
* If the transport didn't set the max_queue_size properly, then clamp
* it to the target limits. Also set default values in case the
* transport didn't set it at all.
*/
if (port->max_queue_size < 0)
port->max_queue_size = NVMET_MAX_QUEUE_SIZE;
else
port->max_queue_size = clamp_t(int, port->max_queue_size,
NVMET_MIN_QUEUE_SIZE,
NVMET_MAX_QUEUE_SIZE);
port->enabled = true; port->enabled = true;
port->tr_ops = ops; port->tr_ops = ops;
return 0; return 0;
...@@ -1223,9 +1235,10 @@ static void nvmet_init_cap(struct nvmet_ctrl *ctrl) ...@@ -1223,9 +1235,10 @@ static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
ctrl->cap |= (15ULL << 24); ctrl->cap |= (15ULL << 24);
/* maximum queue entries supported: */ /* maximum queue entries supported: */
if (ctrl->ops->get_max_queue_size) if (ctrl->ops->get_max_queue_size)
ctrl->cap |= ctrl->ops->get_max_queue_size(ctrl) - 1; ctrl->cap |= min_t(u16, ctrl->ops->get_max_queue_size(ctrl),
ctrl->port->max_queue_size) - 1;
else else
ctrl->cap |= NVMET_QUEUE_SIZE - 1; ctrl->cap |= ctrl->port->max_queue_size - 1;
if (nvmet_is_passthru_subsys(ctrl->subsys)) if (nvmet_is_passthru_subsys(ctrl->subsys))
nvmet_passthrough_override_cap(ctrl); nvmet_passthrough_override_cap(ctrl);
...@@ -1411,6 +1424,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, ...@@ -1411,6 +1424,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
kref_init(&ctrl->ref); kref_init(&ctrl->ref);
ctrl->subsys = subsys; ctrl->subsys = subsys;
ctrl->pi_support = ctrl->port->pi_enable && ctrl->subsys->pi_support;
nvmet_init_cap(ctrl); nvmet_init_cap(ctrl);
WRITE_ONCE(ctrl->aen_enabled, NVMET_AEN_CFG_OPTIONAL); WRITE_ONCE(ctrl->aen_enabled, NVMET_AEN_CFG_OPTIONAL);
......
...@@ -282,7 +282,7 @@ static void nvmet_execute_disc_identify(struct nvmet_req *req) ...@@ -282,7 +282,7 @@ static void nvmet_execute_disc_identify(struct nvmet_req *req)
id->lpa = (1 << 2); id->lpa = (1 << 2);
/* no enforcement soft-limit for maxcmd - pick arbitrary high value */ /* no enforcement soft-limit for maxcmd - pick arbitrary high value */
id->maxcmd = cpu_to_le16(NVMET_MAX_CMD); id->maxcmd = cpu_to_le16(NVMET_MAX_CMD(ctrl));
id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */ id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
if (ctrl->ops->flags & NVMF_KEYED_SGLS) if (ctrl->ops->flags & NVMF_KEYED_SGLS)
......
...@@ -157,7 +157,8 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req) ...@@ -157,7 +157,8 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR; return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
} }
if (sqsize > mqes) { /* for fabrics, this value applies to only the I/O Submission Queues */
if (qid && sqsize > mqes) {
pr_warn("sqsize %u is larger than MQES supported %u cntlid %d\n", pr_warn("sqsize %u is larger than MQES supported %u cntlid %d\n",
sqsize, mqes, ctrl->cntlid); sqsize, mqes, ctrl->cntlid);
req->error_loc = offsetof(struct nvmf_connect_command, sqsize); req->error_loc = offsetof(struct nvmf_connect_command, sqsize);
...@@ -251,8 +252,6 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req) ...@@ -251,8 +252,6 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
if (status) if (status)
goto out; goto out;
ctrl->pi_support = ctrl->port->pi_enable && ctrl->subsys->pi_support;
uuid_copy(&ctrl->hostid, &d->hostid); uuid_copy(&ctrl->hostid, &d->hostid);
ret = nvmet_setup_auth(ctrl); ret = nvmet_setup_auth(ctrl);
......
...@@ -1556,7 +1556,9 @@ static const struct attribute_group *fcloop_dev_attr_groups[] = { ...@@ -1556,7 +1556,9 @@ static const struct attribute_group *fcloop_dev_attr_groups[] = {
NULL, NULL,
}; };
static struct class *fcloop_class; static const struct class fcloop_class = {
.name = "fcloop",
};
static struct device *fcloop_device; static struct device *fcloop_device;
...@@ -1564,15 +1566,14 @@ static int __init fcloop_init(void) ...@@ -1564,15 +1566,14 @@ static int __init fcloop_init(void)
{ {
int ret; int ret;
fcloop_class = class_create("fcloop"); ret = class_register(&fcloop_class);
if (IS_ERR(fcloop_class)) { if (ret) {
pr_err("couldn't register class fcloop\n"); pr_err("couldn't register class fcloop\n");
ret = PTR_ERR(fcloop_class);
return ret; return ret;
} }
fcloop_device = device_create_with_groups( fcloop_device = device_create_with_groups(
fcloop_class, NULL, MKDEV(0, 0), NULL, &fcloop_class, NULL, MKDEV(0, 0), NULL,
fcloop_dev_attr_groups, "ctl"); fcloop_dev_attr_groups, "ctl");
if (IS_ERR(fcloop_device)) { if (IS_ERR(fcloop_device)) {
pr_err("couldn't create ctl device!\n"); pr_err("couldn't create ctl device!\n");
...@@ -1585,7 +1586,7 @@ static int __init fcloop_init(void) ...@@ -1585,7 +1586,7 @@ static int __init fcloop_init(void)
return 0; return 0;
out_destroy_class: out_destroy_class:
class_destroy(fcloop_class); class_unregister(&fcloop_class);
return ret; return ret;
} }
...@@ -1643,8 +1644,8 @@ static void __exit fcloop_exit(void) ...@@ -1643,8 +1644,8 @@ static void __exit fcloop_exit(void)
put_device(fcloop_device); put_device(fcloop_device);
device_destroy(fcloop_class, MKDEV(0, 0)); device_destroy(&fcloop_class, MKDEV(0, 0));
class_destroy(fcloop_class); class_unregister(&fcloop_class);
} }
module_init(fcloop_init); module_init(fcloop_init);
......
...@@ -163,6 +163,7 @@ struct nvmet_port { ...@@ -163,6 +163,7 @@ struct nvmet_port {
void *priv; void *priv;
bool enabled; bool enabled;
int inline_data_size; int inline_data_size;
int max_queue_size;
const struct nvmet_fabrics_ops *tr_ops; const struct nvmet_fabrics_ops *tr_ops;
bool pi_enable; bool pi_enable;
}; };
...@@ -543,9 +544,10 @@ void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys, ...@@ -543,9 +544,10 @@ void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type, void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
u8 event_info, u8 log_page); u8 event_info, u8 log_page);
#define NVMET_QUEUE_SIZE 1024 #define NVMET_MIN_QUEUE_SIZE 16
#define NVMET_MAX_QUEUE_SIZE 1024
#define NVMET_NR_QUEUES 128 #define NVMET_NR_QUEUES 128
#define NVMET_MAX_CMD NVMET_QUEUE_SIZE #define NVMET_MAX_CMD(ctrl) (NVME_CAP_MQES(ctrl->cap) + 1)
/* /*
* Nice round number that makes a list of nsids fit into a page. * Nice round number that makes a list of nsids fit into a page.
......
...@@ -132,7 +132,7 @@ static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req) ...@@ -132,7 +132,7 @@ static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
id->sqes = min_t(__u8, ((0x6 << 4) | 0x6), id->sqes); id->sqes = min_t(__u8, ((0x6 << 4) | 0x6), id->sqes);
id->cqes = min_t(__u8, ((0x4 << 4) | 0x4), id->cqes); id->cqes = min_t(__u8, ((0x4 << 4) | 0x4), id->cqes);
id->maxcmd = cpu_to_le16(NVMET_MAX_CMD); id->maxcmd = cpu_to_le16(NVMET_MAX_CMD(ctrl));
/* don't support fuse commands */ /* don't support fuse commands */
id->fuses = 0; id->fuses = 0;
......
...@@ -1956,6 +1956,14 @@ static int nvmet_rdma_add_port(struct nvmet_port *nport) ...@@ -1956,6 +1956,14 @@ static int nvmet_rdma_add_port(struct nvmet_port *nport)
nport->inline_data_size = NVMET_RDMA_MAX_INLINE_DATA_SIZE; nport->inline_data_size = NVMET_RDMA_MAX_INLINE_DATA_SIZE;
} }
if (nport->max_queue_size < 0) {
nport->max_queue_size = NVME_RDMA_DEFAULT_QUEUE_SIZE;
} else if (nport->max_queue_size > NVME_RDMA_MAX_QUEUE_SIZE) {
pr_warn("max_queue_size %u is too large, reducing to %u\n",
nport->max_queue_size, NVME_RDMA_MAX_QUEUE_SIZE);
nport->max_queue_size = NVME_RDMA_MAX_QUEUE_SIZE;
}
ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr, ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr,
nport->disc_addr.trsvcid, &port->addr); nport->disc_addr.trsvcid, &port->addr);
if (ret) { if (ret) {
...@@ -2015,6 +2023,8 @@ static u8 nvmet_rdma_get_mdts(const struct nvmet_ctrl *ctrl) ...@@ -2015,6 +2023,8 @@ static u8 nvmet_rdma_get_mdts(const struct nvmet_ctrl *ctrl)
static u16 nvmet_rdma_get_max_queue_size(const struct nvmet_ctrl *ctrl) static u16 nvmet_rdma_get_max_queue_size(const struct nvmet_ctrl *ctrl)
{ {
if (ctrl->pi_support)
return NVME_RDMA_MAX_METADATA_QUEUE_SIZE;
return NVME_RDMA_MAX_QUEUE_SIZE; return NVME_RDMA_MAX_QUEUE_SIZE;
} }
......
...@@ -6,7 +6,11 @@ ...@@ -6,7 +6,11 @@
#ifndef _LINUX_NVME_RDMA_H #ifndef _LINUX_NVME_RDMA_H
#define _LINUX_NVME_RDMA_H #define _LINUX_NVME_RDMA_H
#define NVME_RDMA_MAX_QUEUE_SIZE 128 #define NVME_RDMA_IP_PORT 4420
#define NVME_RDMA_MAX_QUEUE_SIZE 256
#define NVME_RDMA_MAX_METADATA_QUEUE_SIZE 128
#define NVME_RDMA_DEFAULT_QUEUE_SIZE 128
enum nvme_rdma_cm_fmt { enum nvme_rdma_cm_fmt {
NVME_RDMA_CM_FMT_1_0 = 0x0, NVME_RDMA_CM_FMT_1_0 = 0x0,
......
...@@ -23,8 +23,6 @@ ...@@ -23,8 +23,6 @@
#define NVME_DISC_SUBSYS_NAME "nqn.2014-08.org.nvmexpress.discovery" #define NVME_DISC_SUBSYS_NAME "nqn.2014-08.org.nvmexpress.discovery"
#define NVME_RDMA_IP_PORT 4420
#define NVME_NSID_ALL 0xffffffff #define NVME_NSID_ALL 0xffffffff
enum nvme_subsys_type { enum nvme_subsys_type {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment