Commit aa36d711 authored by Sagi Grimberg's avatar Sagi Grimberg Committed by Christoph Hellwig

nvme-auth: convert dhchap_auth_list to an array

We know exactly how many dhchap contexts we will need, there is no need
to hold a list that we need to protect with a mutex. Convert to
a dynamically allocated array. And dhchap_context access state is
maintained by the chap itself.

Make dhchap_auth_mutex protect only the ctrl host_key and ctrl_key
in a fine-grained lock such that there is no long lasting acquisition
of the lock and no need to take/release this lock when flushing
authentication works.
Signed-off-by: default avatarSagi Grimberg <sagi@grimberg.me>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent 546dea18
...@@ -50,6 +50,12 @@ struct nvme_dhchap_queue_context { ...@@ -50,6 +50,12 @@ struct nvme_dhchap_queue_context {
#define nvme_auth_queue_from_qid(ctrl, qid) \ #define nvme_auth_queue_from_qid(ctrl, qid) \
(qid == 0) ? (ctrl)->fabrics_q : (ctrl)->connect_q (qid == 0) ? (ctrl)->fabrics_q : (ctrl)->connect_q
static inline int ctrl_max_dhchaps(struct nvme_ctrl *ctrl)
{
return ctrl->opts->nr_io_queues + ctrl->opts->nr_write_queues +
ctrl->opts->nr_poll_queues + 1;
}
static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid, static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid,
void *data, size_t data_len, bool auth_send) void *data, size_t data_len, bool auth_send)
{ {
...@@ -510,6 +516,7 @@ static int nvme_auth_dhchap_setup_ctrl_response(struct nvme_ctrl *ctrl, ...@@ -510,6 +516,7 @@ static int nvme_auth_dhchap_setup_ctrl_response(struct nvme_ctrl *ctrl,
ret = PTR_ERR(ctrl_response); ret = PTR_ERR(ctrl_response);
return ret; return ret;
} }
ret = crypto_shash_setkey(chap->shash_tfm, ret = crypto_shash_setkey(chap->shash_tfm,
ctrl_response, ctrl->ctrl_key->len); ctrl_response, ctrl->ctrl_key->len);
if (ret) { if (ret) {
...@@ -668,7 +675,6 @@ static void nvme_auth_free_dhchap(struct nvme_dhchap_queue_context *chap) ...@@ -668,7 +675,6 @@ static void nvme_auth_free_dhchap(struct nvme_dhchap_queue_context *chap)
crypto_free_shash(chap->shash_tfm); crypto_free_shash(chap->shash_tfm);
if (chap->dh_tfm) if (chap->dh_tfm)
crypto_free_kpp(chap->dh_tfm); crypto_free_kpp(chap->dh_tfm);
kfree(chap);
} }
static void nvme_queue_auth_work(struct work_struct *work) static void nvme_queue_auth_work(struct work_struct *work)
...@@ -748,11 +754,14 @@ static void nvme_queue_auth_work(struct work_struct *work) ...@@ -748,11 +754,14 @@ static void nvme_queue_auth_work(struct work_struct *work)
dev_dbg(ctrl->device, "%s: qid %d host response\n", dev_dbg(ctrl->device, "%s: qid %d host response\n",
__func__, chap->qid); __func__, chap->qid);
mutex_lock(&ctrl->dhchap_auth_mutex);
ret = nvme_auth_dhchap_setup_host_response(ctrl, chap); ret = nvme_auth_dhchap_setup_host_response(ctrl, chap);
if (ret) { if (ret) {
mutex_unlock(&ctrl->dhchap_auth_mutex);
chap->error = ret; chap->error = ret;
goto fail2; goto fail2;
} }
mutex_unlock(&ctrl->dhchap_auth_mutex);
/* DH-HMAC-CHAP Step 3: send reply */ /* DH-HMAC-CHAP Step 3: send reply */
dev_dbg(ctrl->device, "%s: qid %d send reply\n", dev_dbg(ctrl->device, "%s: qid %d send reply\n",
...@@ -793,16 +802,19 @@ static void nvme_queue_auth_work(struct work_struct *work) ...@@ -793,16 +802,19 @@ static void nvme_queue_auth_work(struct work_struct *work)
return; return;
} }
mutex_lock(&ctrl->dhchap_auth_mutex);
if (ctrl->ctrl_key) { if (ctrl->ctrl_key) {
dev_dbg(ctrl->device, dev_dbg(ctrl->device,
"%s: qid %d controller response\n", "%s: qid %d controller response\n",
__func__, chap->qid); __func__, chap->qid);
ret = nvme_auth_dhchap_setup_ctrl_response(ctrl, chap); ret = nvme_auth_dhchap_setup_ctrl_response(ctrl, chap);
if (ret) { if (ret) {
mutex_unlock(&ctrl->dhchap_auth_mutex);
chap->error = ret; chap->error = ret;
goto fail2; goto fail2;
} }
} }
mutex_unlock(&ctrl->dhchap_auth_mutex);
ret = nvme_auth_process_dhchap_success1(ctrl, chap); ret = nvme_auth_process_dhchap_success1(ctrl, chap);
if (ret) { if (ret) {
...@@ -852,29 +864,8 @@ int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid) ...@@ -852,29 +864,8 @@ int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid)
return -ENOKEY; return -ENOKEY;
} }
mutex_lock(&ctrl->dhchap_auth_mutex); chap = &ctrl->dhchap_ctxs[qid];
/* Check if the context is already queued */ cancel_work_sync(&chap->auth_work);
list_for_each_entry(chap, &ctrl->dhchap_auth_list, entry) {
WARN_ON(!chap->buf);
if (chap->qid == qid) {
dev_dbg(ctrl->device, "qid %d: re-using context\n", qid);
mutex_unlock(&ctrl->dhchap_auth_mutex);
flush_work(&chap->auth_work);
nvme_auth_reset_dhchap(chap);
queue_work(nvme_wq, &chap->auth_work);
return 0;
}
}
chap = kzalloc(sizeof(*chap), GFP_KERNEL);
if (!chap) {
mutex_unlock(&ctrl->dhchap_auth_mutex);
return -ENOMEM;
}
chap->qid = qid;
chap->ctrl = ctrl;
INIT_WORK(&chap->auth_work, nvme_queue_auth_work);
list_add(&chap->entry, &ctrl->dhchap_auth_list);
mutex_unlock(&ctrl->dhchap_auth_mutex);
queue_work(nvme_wq, &chap->auth_work); queue_work(nvme_wq, &chap->auth_work);
return 0; return 0;
} }
...@@ -885,19 +876,12 @@ int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid) ...@@ -885,19 +876,12 @@ int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid)
struct nvme_dhchap_queue_context *chap; struct nvme_dhchap_queue_context *chap;
int ret; int ret;
mutex_lock(&ctrl->dhchap_auth_mutex); chap = &ctrl->dhchap_ctxs[qid];
list_for_each_entry(chap, &ctrl->dhchap_auth_list, entry) { flush_work(&chap->auth_work);
if (chap->qid != qid) ret = chap->error;
continue; /* clear sensitive info */
mutex_unlock(&ctrl->dhchap_auth_mutex); nvme_auth_reset_dhchap(chap);
flush_work(&chap->auth_work); return ret;
ret = chap->error;
/* clear sensitive info */
nvme_auth_reset_dhchap(chap);
return ret;
}
mutex_unlock(&ctrl->dhchap_auth_mutex);
return -ENXIO;
} }
EXPORT_SYMBOL_GPL(nvme_auth_wait); EXPORT_SYMBOL_GPL(nvme_auth_wait);
...@@ -946,11 +930,11 @@ static void nvme_ctrl_auth_work(struct work_struct *work) ...@@ -946,11 +930,11 @@ static void nvme_ctrl_auth_work(struct work_struct *work)
int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl) int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl)
{ {
int ret; struct nvme_dhchap_queue_context *chap;
int i, ret;
INIT_LIST_HEAD(&ctrl->dhchap_auth_list);
INIT_WORK(&ctrl->dhchap_auth_work, nvme_ctrl_auth_work);
mutex_init(&ctrl->dhchap_auth_mutex); mutex_init(&ctrl->dhchap_auth_mutex);
INIT_WORK(&ctrl->dhchap_auth_work, nvme_ctrl_auth_work);
if (!ctrl->opts) if (!ctrl->opts)
return 0; return 0;
ret = nvme_auth_generate_key(ctrl->opts->dhchap_secret, ret = nvme_auth_generate_key(ctrl->opts->dhchap_secret,
...@@ -959,37 +943,63 @@ int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl) ...@@ -959,37 +943,63 @@ int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl)
return ret; return ret;
ret = nvme_auth_generate_key(ctrl->opts->dhchap_ctrl_secret, ret = nvme_auth_generate_key(ctrl->opts->dhchap_ctrl_secret,
&ctrl->ctrl_key); &ctrl->ctrl_key);
if (ret) { if (ret)
nvme_auth_free_key(ctrl->host_key); goto err_free_dhchap_secret;
ctrl->host_key = NULL;
if (!ctrl->opts->dhchap_secret && !ctrl->opts->dhchap_ctrl_secret)
return ret;
ctrl->dhchap_ctxs = kvcalloc(ctrl_max_dhchaps(ctrl),
sizeof(*chap), GFP_KERNEL);
if (!ctrl->dhchap_ctxs) {
ret = -ENOMEM;
goto err_free_dhchap_ctrl_secret;
} }
for (i = 0; i < ctrl_max_dhchaps(ctrl); i++) {
chap = &ctrl->dhchap_ctxs[i];
chap->qid = i;
chap->ctrl = ctrl;
INIT_WORK(&chap->auth_work, nvme_queue_auth_work);
}
return 0;
err_free_dhchap_ctrl_secret:
nvme_auth_free_key(ctrl->ctrl_key);
ctrl->ctrl_key = NULL;
err_free_dhchap_secret:
nvme_auth_free_key(ctrl->host_key);
ctrl->host_key = NULL;
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(nvme_auth_init_ctrl); EXPORT_SYMBOL_GPL(nvme_auth_init_ctrl);
void nvme_auth_stop(struct nvme_ctrl *ctrl) void nvme_auth_stop(struct nvme_ctrl *ctrl)
{ {
struct nvme_dhchap_queue_context *chap = NULL, *tmp; struct nvme_dhchap_queue_context *chap;
int i;
cancel_work_sync(&ctrl->dhchap_auth_work); cancel_work_sync(&ctrl->dhchap_auth_work);
mutex_lock(&ctrl->dhchap_auth_mutex); for (i = 0; i < ctrl_max_dhchaps(ctrl); i++) {
list_for_each_entry_safe(chap, tmp, &ctrl->dhchap_auth_list, entry) chap = &ctrl->dhchap_ctxs[i];
cancel_work_sync(&chap->auth_work); cancel_work_sync(&chap->auth_work);
mutex_unlock(&ctrl->dhchap_auth_mutex); }
} }
EXPORT_SYMBOL_GPL(nvme_auth_stop); EXPORT_SYMBOL_GPL(nvme_auth_stop);
void nvme_auth_free(struct nvme_ctrl *ctrl) void nvme_auth_free(struct nvme_ctrl *ctrl)
{ {
struct nvme_dhchap_queue_context *chap = NULL, *tmp; struct nvme_dhchap_queue_context *chap;
int i;
mutex_lock(&ctrl->dhchap_auth_mutex); if (ctrl->dhchap_ctxs) {
list_for_each_entry_safe(chap, tmp, &ctrl->dhchap_auth_list, entry) { for (i = 0; i < ctrl_max_dhchaps(ctrl); i++) {
list_del_init(&chap->entry); chap = &ctrl->dhchap_ctxs[i];
flush_work(&chap->auth_work); flush_work(&chap->auth_work);
nvme_auth_free_dhchap(chap); nvme_auth_free_dhchap(chap);
}
kfree(ctrl->dhchap_ctxs);
} }
mutex_unlock(&ctrl->dhchap_auth_mutex);
if (ctrl->host_key) { if (ctrl->host_key) {
nvme_auth_free_key(ctrl->host_key); nvme_auth_free_key(ctrl->host_key);
ctrl->host_key = NULL; ctrl->host_key = NULL;
......
...@@ -3785,7 +3785,9 @@ static ssize_t nvme_ctrl_dhchap_secret_store(struct device *dev, ...@@ -3785,7 +3785,9 @@ static ssize_t nvme_ctrl_dhchap_secret_store(struct device *dev,
kfree(opts->dhchap_secret); kfree(opts->dhchap_secret);
opts->dhchap_secret = dhchap_secret; opts->dhchap_secret = dhchap_secret;
host_key = ctrl->host_key; host_key = ctrl->host_key;
mutex_lock(&ctrl->dhchap_auth_mutex);
ctrl->host_key = key; ctrl->host_key = key;
mutex_unlock(&ctrl->dhchap_auth_mutex);
nvme_auth_free_key(host_key); nvme_auth_free_key(host_key);
} }
/* Start re-authentication */ /* Start re-authentication */
...@@ -3837,7 +3839,9 @@ static ssize_t nvme_ctrl_dhchap_ctrl_secret_store(struct device *dev, ...@@ -3837,7 +3839,9 @@ static ssize_t nvme_ctrl_dhchap_ctrl_secret_store(struct device *dev,
kfree(opts->dhchap_ctrl_secret); kfree(opts->dhchap_ctrl_secret);
opts->dhchap_ctrl_secret = dhchap_secret; opts->dhchap_ctrl_secret = dhchap_secret;
ctrl_key = ctrl->ctrl_key; ctrl_key = ctrl->ctrl_key;
mutex_lock(&ctrl->dhchap_auth_mutex);
ctrl->ctrl_key = key; ctrl->ctrl_key = key;
mutex_unlock(&ctrl->dhchap_auth_mutex);
nvme_auth_free_key(ctrl_key); nvme_auth_free_key(ctrl_key);
} }
/* Start re-authentication */ /* Start re-authentication */
......
...@@ -337,8 +337,8 @@ struct nvme_ctrl { ...@@ -337,8 +337,8 @@ struct nvme_ctrl {
#ifdef CONFIG_NVME_AUTH #ifdef CONFIG_NVME_AUTH
struct work_struct dhchap_auth_work; struct work_struct dhchap_auth_work;
struct list_head dhchap_auth_list;
struct mutex dhchap_auth_mutex; struct mutex dhchap_auth_mutex;
struct nvme_dhchap_queue_context *dhchap_ctxs;
struct nvme_dhchap_key *host_key; struct nvme_dhchap_key *host_key;
struct nvme_dhchap_key *ctrl_key; struct nvme_dhchap_key *ctrl_key;
u16 transaction; u16 transaction;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment