Commit ca2b221d authored by Max Gurtovoy's avatar Max Gurtovoy Committed by Keith Busch

nvmet: introduce new max queue size configuration entry

Using this port configuration, one will be able to set the maximal queue
size to be used for any controller that will be associated to the
configured port.

The default value stayed 1024 but each transport will be able to set the
its own values before enabling the port.

Introduce lower limit of 16 for minimal queue depth (same as we use in
the host fabrics drivers).
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarIsrael Rukshin <israelr@nvidia.com>
Reviewed-by: default avatarSagi Grimberg <sagi@grimberg.me>
Reviewed-by: default avatarGuixin Liu <kanie@linux.alibaba.com>
Signed-off-by: default avatarMax Gurtovoy <mgurtovoy@nvidia.com>
Signed-off-by: default avatarKeith Busch <kbusch@kernel.org>
parent ad178ba9
...@@ -273,6 +273,32 @@ static ssize_t nvmet_param_inline_data_size_store(struct config_item *item, ...@@ -273,6 +273,32 @@ static ssize_t nvmet_param_inline_data_size_store(struct config_item *item,
CONFIGFS_ATTR(nvmet_, param_inline_data_size); CONFIGFS_ATTR(nvmet_, param_inline_data_size);
static ssize_t nvmet_param_max_queue_size_show(struct config_item *item,
char *page)
{
struct nvmet_port *port = to_nvmet_port(item);
return snprintf(page, PAGE_SIZE, "%d\n", port->max_queue_size);
}
static ssize_t nvmet_param_max_queue_size_store(struct config_item *item,
const char *page, size_t count)
{
struct nvmet_port *port = to_nvmet_port(item);
int ret;
if (nvmet_is_port_enabled(port, __func__))
return -EACCES;
ret = kstrtoint(page, 0, &port->max_queue_size);
if (ret) {
pr_err("Invalid value '%s' for max_queue_size\n", page);
return -EINVAL;
}
return count;
}
CONFIGFS_ATTR(nvmet_, param_max_queue_size);
#ifdef CONFIG_BLK_DEV_INTEGRITY #ifdef CONFIG_BLK_DEV_INTEGRITY
static ssize_t nvmet_param_pi_enable_show(struct config_item *item, static ssize_t nvmet_param_pi_enable_show(struct config_item *item,
char *page) char *page)
...@@ -1859,6 +1885,7 @@ static struct configfs_attribute *nvmet_port_attrs[] = { ...@@ -1859,6 +1885,7 @@ static struct configfs_attribute *nvmet_port_attrs[] = {
&nvmet_attr_addr_trtype, &nvmet_attr_addr_trtype,
&nvmet_attr_addr_tsas, &nvmet_attr_addr_tsas,
&nvmet_attr_param_inline_data_size, &nvmet_attr_param_inline_data_size,
&nvmet_attr_param_max_queue_size,
#ifdef CONFIG_BLK_DEV_INTEGRITY #ifdef CONFIG_BLK_DEV_INTEGRITY
&nvmet_attr_param_pi_enable, &nvmet_attr_param_pi_enable,
#endif #endif
...@@ -1917,6 +1944,7 @@ static struct config_group *nvmet_ports_make(struct config_group *group, ...@@ -1917,6 +1944,7 @@ static struct config_group *nvmet_ports_make(struct config_group *group,
INIT_LIST_HEAD(&port->subsystems); INIT_LIST_HEAD(&port->subsystems);
INIT_LIST_HEAD(&port->referrals); INIT_LIST_HEAD(&port->referrals);
port->inline_data_size = -1; /* < 0 == let the transport choose */ port->inline_data_size = -1; /* < 0 == let the transport choose */
port->max_queue_size = -1; /* < 0 == let the transport choose */
port->disc_addr.portid = cpu_to_le16(portid); port->disc_addr.portid = cpu_to_le16(portid);
port->disc_addr.adrfam = NVMF_ADDR_FAMILY_MAX; port->disc_addr.adrfam = NVMF_ADDR_FAMILY_MAX;
......
...@@ -358,6 +358,18 @@ int nvmet_enable_port(struct nvmet_port *port) ...@@ -358,6 +358,18 @@ int nvmet_enable_port(struct nvmet_port *port)
if (port->inline_data_size < 0) if (port->inline_data_size < 0)
port->inline_data_size = 0; port->inline_data_size = 0;
/*
* If the transport didn't set the max_queue_size properly, then clamp
* it to the target limits. Also set default values in case the
* transport didn't set it at all.
*/
if (port->max_queue_size < 0)
port->max_queue_size = NVMET_MAX_QUEUE_SIZE;
else
port->max_queue_size = clamp_t(int, port->max_queue_size,
NVMET_MIN_QUEUE_SIZE,
NVMET_MAX_QUEUE_SIZE);
port->enabled = true; port->enabled = true;
port->tr_ops = ops; port->tr_ops = ops;
return 0; return 0;
...@@ -1223,9 +1235,10 @@ static void nvmet_init_cap(struct nvmet_ctrl *ctrl) ...@@ -1223,9 +1235,10 @@ static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
ctrl->cap |= (15ULL << 24); ctrl->cap |= (15ULL << 24);
/* maximum queue entries supported: */ /* maximum queue entries supported: */
if (ctrl->ops->get_max_queue_size) if (ctrl->ops->get_max_queue_size)
ctrl->cap |= ctrl->ops->get_max_queue_size(ctrl) - 1; ctrl->cap |= min_t(u16, ctrl->ops->get_max_queue_size(ctrl),
ctrl->port->max_queue_size) - 1;
else else
ctrl->cap |= NVMET_QUEUE_SIZE - 1; ctrl->cap |= ctrl->port->max_queue_size - 1;
if (nvmet_is_passthru_subsys(ctrl->subsys)) if (nvmet_is_passthru_subsys(ctrl->subsys))
nvmet_passthrough_override_cap(ctrl); nvmet_passthrough_override_cap(ctrl);
......
...@@ -163,6 +163,7 @@ struct nvmet_port { ...@@ -163,6 +163,7 @@ struct nvmet_port {
void *priv; void *priv;
bool enabled; bool enabled;
int inline_data_size; int inline_data_size;
int max_queue_size;
const struct nvmet_fabrics_ops *tr_ops; const struct nvmet_fabrics_ops *tr_ops;
bool pi_enable; bool pi_enable;
}; };
...@@ -543,7 +544,8 @@ void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys, ...@@ -543,7 +544,8 @@ void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type, void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
u8 event_info, u8 log_page); u8 event_info, u8 log_page);
#define NVMET_QUEUE_SIZE 1024 #define NVMET_MIN_QUEUE_SIZE 16
#define NVMET_MAX_QUEUE_SIZE 1024
#define NVMET_NR_QUEUES 128 #define NVMET_NR_QUEUES 128
#define NVMET_MAX_CMD(ctrl) (NVME_CAP_MQES(ctrl->cap) + 1) #define NVMET_MAX_CMD(ctrl) (NVME_CAP_MQES(ctrl->cap) + 1)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment