Commit a2b658e4 authored by Jens Axboe's avatar Jens Axboe

Merge tag 'nvme-5.12-2021-03-05' of git://git.infradead.org/nvme into block-5.12

Pull NVMe fixes from Christoph:

"nvme fixes for 5.12:

 - more device quirks (Julian Einwag, Zoltán Böszörményi, Pascal Terjan)
 - fix a hwmon error return (Daniel Wagner)
 - fix the keep alive timeout initialization (Martin George)
 - ensure the model_number can't be changed on a used subsystem
   (Max Gurtovoy)"

* tag 'nvme-5.12-2021-03-05' of git://git.infradead.org/nvme:
  nvmet: model_number must be immutable once set
  nvme-fabrics: fix kato initialization
  nvme-hwmon: Return error code when registration fails
  nvme-pci: add quirks for Lexar 256GB SSD
  nvme-pci: mark Kingston SKC2000 as not supporting the deepest power state
  nvme-pci: mark Seagate Nytro XM1440 as QUIRK_NO_NS_DESC_LIST.
parents 77516d25 d9f273b7
...@@ -630,7 +630,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, ...@@ -630,7 +630,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
opts->queue_size = NVMF_DEF_QUEUE_SIZE; opts->queue_size = NVMF_DEF_QUEUE_SIZE;
opts->nr_io_queues = num_online_cpus(); opts->nr_io_queues = num_online_cpus();
opts->reconnect_delay = NVMF_DEF_RECONNECT_DELAY; opts->reconnect_delay = NVMF_DEF_RECONNECT_DELAY;
opts->kato = NVME_DEFAULT_KATO; opts->kato = 0;
opts->duplicate_connect = false; opts->duplicate_connect = false;
opts->fast_io_fail_tmo = NVMF_DEF_FAIL_FAST_TMO; opts->fast_io_fail_tmo = NVMF_DEF_FAIL_FAST_TMO;
opts->hdr_digest = false; opts->hdr_digest = false;
...@@ -893,6 +893,9 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, ...@@ -893,6 +893,9 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
opts->nr_write_queues = 0; opts->nr_write_queues = 0;
opts->nr_poll_queues = 0; opts->nr_poll_queues = 0;
opts->duplicate_connect = true; opts->duplicate_connect = true;
} else {
if (!opts->kato)
opts->kato = NVME_DEFAULT_KATO;
} }
if (ctrl_loss_tmo < 0) { if (ctrl_loss_tmo < 0) {
opts->max_reconnects = -1; opts->max_reconnects = -1;
......
...@@ -248,6 +248,7 @@ int nvme_hwmon_init(struct nvme_ctrl *ctrl) ...@@ -248,6 +248,7 @@ int nvme_hwmon_init(struct nvme_ctrl *ctrl)
if (IS_ERR(hwmon)) { if (IS_ERR(hwmon)) {
dev_warn(dev, "Failed to instantiate hwmon device\n"); dev_warn(dev, "Failed to instantiate hwmon device\n");
kfree(data); kfree(data);
return PTR_ERR(hwmon);
} }
ctrl->hwmon_device = hwmon; ctrl->hwmon_device = hwmon;
return 0; return 0;
......
...@@ -3234,7 +3234,8 @@ static const struct pci_device_id nvme_id_table[] = { ...@@ -3234,7 +3234,8 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */ { PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */
.driver_data = NVME_QUIRK_NO_NS_DESC_LIST, }, .driver_data = NVME_QUIRK_NO_NS_DESC_LIST, },
{ PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */ { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
NVME_QUIRK_NO_NS_DESC_LIST, },
{ PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */ { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
{ PCI_DEVICE(0x1c58, 0x0023), /* WDC SN200 adapter */ { PCI_DEVICE(0x1c58, 0x0023), /* WDC SN200 adapter */
...@@ -3248,6 +3249,9 @@ static const struct pci_device_id nvme_id_table[] = { ...@@ -3248,6 +3249,9 @@ static const struct pci_device_id nvme_id_table[] = {
NVME_QUIRK_IGNORE_DEV_SUBNQN, }, NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */ { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */
.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE(0x1b4b, 0x1092), /* Lexar 256 GB SSD */
.driver_data = NVME_QUIRK_NO_NS_DESC_LIST |
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE(0x1d1d, 0x1f1f), /* LighNVM qemu device */ { PCI_DEVICE(0x1d1d, 0x1f1f), /* LighNVM qemu device */
.driver_data = NVME_QUIRK_LIGHTNVM, }, .driver_data = NVME_QUIRK_LIGHTNVM, },
{ PCI_DEVICE(0x1d1d, 0x2807), /* CNEX WL */ { PCI_DEVICE(0x1d1d, 0x2807), /* CNEX WL */
...@@ -3265,6 +3269,8 @@ static const struct pci_device_id nvme_id_table[] = { ...@@ -3265,6 +3269,8 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x1d97, 0x2263), /* SPCC */ { PCI_DEVICE(0x1d97, 0x2263), /* SPCC */
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x2646, 0x2262), /* KINGSTON SKC2000 NVMe SSD */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
{ PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */ { PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061), { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
......
...@@ -313,27 +313,40 @@ static void nvmet_execute_get_log_page(struct nvmet_req *req) ...@@ -313,27 +313,40 @@ static void nvmet_execute_get_log_page(struct nvmet_req *req)
nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR); nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
} }
static void nvmet_id_set_model_number(struct nvme_id_ctrl *id, static u16 nvmet_set_model_number(struct nvmet_subsys *subsys)
struct nvmet_subsys *subsys)
{ {
const char *model = NVMET_DEFAULT_CTRL_MODEL; u16 status = 0;
struct nvmet_subsys_model *subsys_model;
mutex_lock(&subsys->lock);
if (!subsys->model_number) {
subsys->model_number =
kstrdup(NVMET_DEFAULT_CTRL_MODEL, GFP_KERNEL);
if (!subsys->model_number)
status = NVME_SC_INTERNAL;
}
mutex_unlock(&subsys->lock);
rcu_read_lock(); return status;
subsys_model = rcu_dereference(subsys->model);
if (subsys_model)
model = subsys_model->number;
memcpy_and_pad(id->mn, sizeof(id->mn), model, strlen(model), ' ');
rcu_read_unlock();
} }
static void nvmet_execute_identify_ctrl(struct nvmet_req *req) static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
{ {
struct nvmet_ctrl *ctrl = req->sq->ctrl; struct nvmet_ctrl *ctrl = req->sq->ctrl;
struct nvmet_subsys *subsys = ctrl->subsys;
struct nvme_id_ctrl *id; struct nvme_id_ctrl *id;
u32 cmd_capsule_size; u32 cmd_capsule_size;
u16 status = 0; u16 status = 0;
/*
* If there is no model number yet, set it now. It will then remain
* stable for the life time of the subsystem.
*/
if (!subsys->model_number) {
status = nvmet_set_model_number(subsys);
if (status)
goto out;
}
id = kzalloc(sizeof(*id), GFP_KERNEL); id = kzalloc(sizeof(*id), GFP_KERNEL);
if (!id) { if (!id) {
status = NVME_SC_INTERNAL; status = NVME_SC_INTERNAL;
...@@ -347,7 +360,8 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) ...@@ -347,7 +360,8 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
memset(id->sn, ' ', sizeof(id->sn)); memset(id->sn, ' ', sizeof(id->sn));
bin2hex(id->sn, &ctrl->subsys->serial, bin2hex(id->sn, &ctrl->subsys->serial,
min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2)); min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
nvmet_id_set_model_number(id, ctrl->subsys); memcpy_and_pad(id->mn, sizeof(id->mn), subsys->model_number,
strlen(subsys->model_number), ' ');
memcpy_and_pad(id->fr, sizeof(id->fr), memcpy_and_pad(id->fr, sizeof(id->fr),
UTS_RELEASE, strlen(UTS_RELEASE), ' '); UTS_RELEASE, strlen(UTS_RELEASE), ' ');
......
...@@ -1118,16 +1118,12 @@ static ssize_t nvmet_subsys_attr_model_show(struct config_item *item, ...@@ -1118,16 +1118,12 @@ static ssize_t nvmet_subsys_attr_model_show(struct config_item *item,
char *page) char *page)
{ {
struct nvmet_subsys *subsys = to_subsys(item); struct nvmet_subsys *subsys = to_subsys(item);
struct nvmet_subsys_model *subsys_model;
char *model = NVMET_DEFAULT_CTRL_MODEL;
int ret; int ret;
rcu_read_lock(); mutex_lock(&subsys->lock);
subsys_model = rcu_dereference(subsys->model); ret = snprintf(page, PAGE_SIZE, "%s\n", subsys->model_number ?
if (subsys_model) subsys->model_number : NVMET_DEFAULT_CTRL_MODEL);
model = subsys_model->number; mutex_unlock(&subsys->lock);
ret = snprintf(page, PAGE_SIZE, "%s\n", model);
rcu_read_unlock();
return ret; return ret;
} }
...@@ -1138,14 +1134,17 @@ static bool nvmet_is_ascii(const char c) ...@@ -1138,14 +1134,17 @@ static bool nvmet_is_ascii(const char c)
return c >= 0x20 && c <= 0x7e; return c >= 0x20 && c <= 0x7e;
} }
static ssize_t nvmet_subsys_attr_model_store(struct config_item *item, static ssize_t nvmet_subsys_attr_model_store_locked(struct nvmet_subsys *subsys,
const char *page, size_t count) const char *page, size_t count)
{ {
struct nvmet_subsys *subsys = to_subsys(item);
struct nvmet_subsys_model *new_model;
char *new_model_number;
int pos = 0, len; int pos = 0, len;
if (subsys->model_number) {
pr_err("Can't set model number. %s is already assigned\n",
subsys->model_number);
return -EINVAL;
}
len = strcspn(page, "\n"); len = strcspn(page, "\n");
if (!len) if (!len)
return -EINVAL; return -EINVAL;
...@@ -1155,28 +1154,25 @@ static ssize_t nvmet_subsys_attr_model_store(struct config_item *item, ...@@ -1155,28 +1154,25 @@ static ssize_t nvmet_subsys_attr_model_store(struct config_item *item,
return -EINVAL; return -EINVAL;
} }
new_model_number = kmemdup_nul(page, len, GFP_KERNEL); subsys->model_number = kmemdup_nul(page, len, GFP_KERNEL);
if (!new_model_number) if (!subsys->model_number)
return -ENOMEM; return -ENOMEM;
return count;
}
new_model = kzalloc(sizeof(*new_model) + len + 1, GFP_KERNEL); static ssize_t nvmet_subsys_attr_model_store(struct config_item *item,
if (!new_model) { const char *page, size_t count)
kfree(new_model_number); {
return -ENOMEM; struct nvmet_subsys *subsys = to_subsys(item);
} ssize_t ret;
memcpy(new_model->number, new_model_number, len);
down_write(&nvmet_config_sem); down_write(&nvmet_config_sem);
mutex_lock(&subsys->lock); mutex_lock(&subsys->lock);
new_model = rcu_replace_pointer(subsys->model, new_model, ret = nvmet_subsys_attr_model_store_locked(subsys, page, count);
mutex_is_locked(&subsys->lock));
mutex_unlock(&subsys->lock); mutex_unlock(&subsys->lock);
up_write(&nvmet_config_sem); up_write(&nvmet_config_sem);
kfree_rcu(new_model, rcuhead); return ret;
kfree(new_model_number);
return count;
} }
CONFIGFS_ATTR(nvmet_subsys_, attr_model); CONFIGFS_ATTR(nvmet_subsys_, attr_model);
......
...@@ -1532,7 +1532,7 @@ static void nvmet_subsys_free(struct kref *ref) ...@@ -1532,7 +1532,7 @@ static void nvmet_subsys_free(struct kref *ref)
nvmet_passthru_subsys_free(subsys); nvmet_passthru_subsys_free(subsys);
kfree(subsys->subsysnqn); kfree(subsys->subsysnqn);
kfree_rcu(subsys->model, rcuhead); kfree(subsys->model_number);
kfree(subsys); kfree(subsys);
} }
......
...@@ -208,11 +208,6 @@ struct nvmet_ctrl { ...@@ -208,11 +208,6 @@ struct nvmet_ctrl {
bool pi_support; bool pi_support;
}; };
struct nvmet_subsys_model {
struct rcu_head rcuhead;
char number[];
};
struct nvmet_subsys { struct nvmet_subsys {
enum nvme_subsys_type type; enum nvme_subsys_type type;
...@@ -242,7 +237,7 @@ struct nvmet_subsys { ...@@ -242,7 +237,7 @@ struct nvmet_subsys {
struct config_group namespaces_group; struct config_group namespaces_group;
struct config_group allowed_hosts_group; struct config_group allowed_hosts_group;
struct nvmet_subsys_model __rcu *model; char *model_number;
#ifdef CONFIG_NVME_TARGET_PASSTHRU #ifdef CONFIG_NVME_TARGET_PASSTHRU
struct nvme_ctrl *passthru_ctrl; struct nvme_ctrl *passthru_ctrl;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment