Commit 98141430 authored by Jens Axboe's avatar Jens Axboe

Merge tag 'nvme-6.12-2024-09-06' of git://git.infradead.org/nvme into for-6.12/block

Pull NVMe updates from Keith:

"nvme updates for Linux 6.12

 - Asynchronous namespace scanning (Stuart)
 - TCP TLS updates (Hannes)
 - RDMA queue controller validation (Niklas)
 - Align field names to the spec (Anuj)
 - Metadata support validation (Puranjay)"

* tag 'nvme-6.12-2024-09-06' of git://git.infradead.org/nvme:
  nvme: fix metadata handling in nvme-passthrough
  nvme: rename apptag and appmask to lbat and lbatm
  nvme-rdma: send cntlid in the RDMA_CM_REQUEST Private Data
  nvme-target: do not check authentication status for admin commands twice
  nvmet-auth: allow to clear DH-HMAC-CHAP keys
  nvme-sysfs: add 'tls_keyring' attribute
  nvme-sysfs: add 'tls_configured_key' sysfs attribute
  nvme: split off TLS sysfs attributes into a separate group
  nvme: add a newline to the 'tls_key' sysfs attribute
  nvme-tcp: check for invalidated or revoked key
  nvme-tcp: sanitize TLS key handling
  nvme-keyring: restrict match length for version '1' identifiers
  nvme_core: scan namespaces asynchronously
parents 68d20eb6 7c2fd760
......@@ -20,6 +20,28 @@ key_serial_t nvme_keyring_id(void)
}
EXPORT_SYMBOL_GPL(nvme_keyring_id);
static bool nvme_tls_psk_revoked(struct key *psk)
{
return test_bit(KEY_FLAG_REVOKED, &psk->flags) ||
test_bit(KEY_FLAG_INVALIDATED, &psk->flags);
}
struct key *nvme_tls_key_lookup(key_serial_t key_id)
{
struct key *key = key_lookup(key_id);
if (IS_ERR(key)) {
pr_err("key id %08x not found\n", key_id);
return key;
}
if (nvme_tls_psk_revoked(key)) {
pr_err("key id %08x revoked\n", key_id);
return ERR_PTR(-EKEYREVOKED);
}
return key;
}
EXPORT_SYMBOL_GPL(nvme_tls_key_lookup);
static void nvme_tls_psk_describe(const struct key *key, struct seq_file *m)
{
seq_puts(m, key->description);
......@@ -36,14 +58,12 @@ static bool nvme_tls_psk_match(const struct key *key,
pr_debug("%s: no key description\n", __func__);
return false;
}
match_len = strlen(key->description);
pr_debug("%s: id %s len %zd\n", __func__, key->description, match_len);
if (!match_data->raw_data) {
pr_debug("%s: no match data\n", __func__);
return false;
}
match_id = match_data->raw_data;
match_len = strlen(match_id);
pr_debug("%s: match '%s' '%s' len %zd\n",
__func__, match_id, key->description, match_len);
return !memcmp(key->description, match_id, match_len);
......@@ -71,7 +91,7 @@ static struct key_type nvme_tls_psk_key_type = {
static struct key *nvme_tls_psk_lookup(struct key *keyring,
const char *hostnqn, const char *subnqn,
int hmac, bool generated)
u8 hmac, u8 psk_ver, bool generated)
{
char *identity;
size_t identity_len = (NVMF_NQN_SIZE) * 2 + 11;
......@@ -82,8 +102,8 @@ static struct key *nvme_tls_psk_lookup(struct key *keyring,
if (!identity)
return ERR_PTR(-ENOMEM);
snprintf(identity, identity_len, "NVMe0%c%02d %s %s",
generated ? 'G' : 'R', hmac, hostnqn, subnqn);
snprintf(identity, identity_len, "NVMe%u%c%02u %s %s",
psk_ver, generated ? 'G' : 'R', hmac, hostnqn, subnqn);
if (!keyring)
keyring = nvme_keyring;
......@@ -107,21 +127,38 @@ static struct key *nvme_tls_psk_lookup(struct key *keyring,
/*
* NVMe PSK priority list
*
* 'Retained' PSKs (ie 'generated == false')
* should be preferred to 'generated' PSKs,
* and SHA-384 should be preferred to SHA-256.
* 'Retained' PSKs (ie 'generated == false') should be preferred to 'generated'
* PSKs, PSKs with hash (psk_ver 1) should be preferred to PSKs without hash
* (psk_ver 0), and SHA-384 should be preferred to SHA-256.
*/
static struct nvme_tls_psk_priority_list {
bool generated;
u8 psk_ver;
enum nvme_tcp_tls_cipher cipher;
} nvme_tls_psk_prio[] = {
{ .generated = false,
.psk_ver = 1,
.cipher = NVME_TCP_TLS_CIPHER_SHA384, },
{ .generated = false,
.psk_ver = 1,
.cipher = NVME_TCP_TLS_CIPHER_SHA256, },
{ .generated = false,
.psk_ver = 0,
.cipher = NVME_TCP_TLS_CIPHER_SHA384, },
{ .generated = false,
.psk_ver = 0,
.cipher = NVME_TCP_TLS_CIPHER_SHA256, },
{ .generated = true,
.psk_ver = 1,
.cipher = NVME_TCP_TLS_CIPHER_SHA384, },
{ .generated = true,
.psk_ver = 1,
.cipher = NVME_TCP_TLS_CIPHER_SHA256, },
{ .generated = true,
.psk_ver = 0,
.cipher = NVME_TCP_TLS_CIPHER_SHA384, },
{ .generated = true,
.psk_ver = 0,
.cipher = NVME_TCP_TLS_CIPHER_SHA256, },
};
......@@ -137,10 +174,11 @@ key_serial_t nvme_tls_psk_default(struct key *keyring,
for (prio = 0; prio < ARRAY_SIZE(nvme_tls_psk_prio); prio++) {
bool generated = nvme_tls_psk_prio[prio].generated;
u8 ver = nvme_tls_psk_prio[prio].psk_ver;
enum nvme_tcp_tls_cipher cipher = nvme_tls_psk_prio[prio].cipher;
tls_key = nvme_tls_psk_lookup(keyring, hostnqn, subnqn,
cipher, generated);
cipher, ver, generated);
if (!IS_ERR(tls_key)) {
tls_key_id = tls_key->serial;
key_put(tls_key);
......
......@@ -109,6 +109,7 @@ config NVME_HOST_AUTH
bool "NVMe over Fabrics In-Band Authentication in host side"
depends on NVME_CORE
select NVME_AUTH
select NVME_KEYRING if NVME_TCP_TLS
help
This provides support for NVMe over Fabrics In-Band Authentication in
host side.
......
......@@ -4,6 +4,7 @@
* Copyright (c) 2011-2014, Intel Corporation.
*/
#include <linux/async.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/blk-integrity.h>
......@@ -986,8 +987,8 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
cmnd->rw.length =
cpu_to_le16((blk_rq_bytes(req) >> ns->head->lba_shift) - 1);
cmnd->rw.reftag = 0;
cmnd->rw.apptag = 0;
cmnd->rw.appmask = 0;
cmnd->rw.lbat = 0;
cmnd->rw.lbatm = 0;
if (ns->head->ms) {
/*
......@@ -4040,6 +4041,35 @@ static void nvme_scan_ns(struct nvme_ctrl *ctrl, unsigned nsid)
}
}
/**
* struct async_scan_info - keeps track of controller & NSIDs to scan
* @ctrl: Controller on which namespaces are being scanned
* @next_nsid: Index of next NSID to scan in ns_list
* @ns_list: Pointer to list of NSIDs to scan
*
* Note: There is a single async_scan_info structure shared by all instances
* of nvme_scan_ns_async() scanning a given controller, so the atomic
* operations on next_nsid are critical to ensure each instance scans a unique
* NSID.
*/
struct async_scan_info {
struct nvme_ctrl *ctrl;
atomic_t next_nsid;
__le32 *ns_list;
};
static void nvme_scan_ns_async(void *data, async_cookie_t cookie)
{
struct async_scan_info *scan_info = data;
int idx;
u32 nsid;
idx = (u32)atomic_fetch_inc(&scan_info->next_nsid);
nsid = le32_to_cpu(scan_info->ns_list[idx]);
nvme_scan_ns(scan_info->ctrl, nsid);
}
static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
unsigned nsid)
{
......@@ -4066,11 +4096,15 @@ static int nvme_scan_ns_list(struct nvme_ctrl *ctrl)
__le32 *ns_list;
u32 prev = 0;
int ret = 0, i;
ASYNC_DOMAIN(domain);
struct async_scan_info scan_info;
ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
if (!ns_list)
return -ENOMEM;
scan_info.ctrl = ctrl;
scan_info.ns_list = ns_list;
for (;;) {
struct nvme_command cmd = {
.identify.opcode = nvme_admin_identify,
......@@ -4086,19 +4120,23 @@ static int nvme_scan_ns_list(struct nvme_ctrl *ctrl)
goto free;
}
atomic_set(&scan_info.next_nsid, 0);
for (i = 0; i < nr_entries; i++) {
u32 nsid = le32_to_cpu(ns_list[i]);
if (!nsid) /* end of the list? */
goto out;
nvme_scan_ns(ctrl, nsid);
async_schedule_domain(nvme_scan_ns_async, &scan_info,
&domain);
while (++prev < nsid)
nvme_ns_remove_by_nsid(ctrl, prev);
}
async_synchronize_full_domain(&domain);
}
out:
nvme_remove_invalid_namespaces(ctrl, prev);
free:
async_synchronize_full_domain(&domain);
kfree(ns_list);
return ret;
}
......@@ -4677,7 +4715,6 @@ static void nvme_free_ctrl(struct device *dev)
if (!subsys || ctrl->instance != subsys->instance)
ida_free(&nvme_instance_ida, ctrl->instance);
key_put(ctrl->tls_key);
nvme_free_cels(ctrl);
nvme_mpath_uninit(ctrl);
cleanup_srcu_struct(&ctrl->srcu);
......
......@@ -665,7 +665,7 @@ static struct key *nvmf_parse_key(int key_id)
return ERR_PTR(-EINVAL);
}
key = key_lookup(key_id);
key = nvme_tls_key_lookup(key_id);
if (IS_ERR(key))
pr_err("key id %08x not found\n", key_id);
else
......
......@@ -4,6 +4,7 @@
* Copyright (c) 2017-2021 Christoph Hellwig.
*/
#include <linux/bio-integrity.h>
#include <linux/blk-integrity.h>
#include <linux/ptrace.h> /* for force_successful_syscall_return */
#include <linux/nvme_ioctl.h>
#include <linux/io_uring/cmd.h>
......@@ -119,9 +120,14 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
struct request_queue *q = req->q;
struct nvme_ns *ns = q->queuedata;
struct block_device *bdev = ns ? ns->disk->part0 : NULL;
bool supports_metadata = bdev && blk_get_integrity(bdev->bd_disk);
bool has_metadata = meta_buffer && meta_len;
struct bio *bio = NULL;
int ret;
if (has_metadata && !supports_metadata)
return -EINVAL;
if (ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED)) {
struct iov_iter iter;
......@@ -143,15 +149,15 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
goto out;
bio = req->bio;
if (bdev) {
if (bdev)
bio_set_dev(bio, bdev);
if (meta_buffer && meta_len) {
ret = bio_integrity_map_user(bio, meta_buffer, meta_len,
meta_seed);
if (ret)
goto out_unmap;
req->cmd_flags |= REQ_INTEGRITY;
}
if (has_metadata) {
ret = bio_integrity_map_user(bio, meta_buffer, meta_len,
meta_seed);
if (ret)
goto out_unmap;
req->cmd_flags |= REQ_INTEGRITY;
}
return ret;
......@@ -260,8 +266,8 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
c.rw.control = cpu_to_le16(io.control);
c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
c.rw.reftag = cpu_to_le32(io.reftag);
c.rw.apptag = cpu_to_le16(io.apptag);
c.rw.appmask = cpu_to_le16(io.appmask);
c.rw.lbat = cpu_to_le16(io.apptag);
c.rw.lbatm = cpu_to_le16(io.appmask);
return nvme_submit_user_cmd(ns->queue, &c, io.addr, length, metadata,
meta_len, lower_32_bits(io.slba), NULL, 0, 0);
......
......@@ -373,7 +373,7 @@ struct nvme_ctrl {
struct nvme_dhchap_key *ctrl_key;
u16 transaction;
#endif
struct key *tls_key;
key_serial_t tls_pskid;
/* Power saving configuration */
u64 ps_max_latency_us;
......
......@@ -1363,8 +1363,8 @@ static void nvme_rdma_set_sig_domain(struct blk_integrity *bi,
if (control & NVME_RW_PRINFO_PRCHK_REF)
domain->sig.dif.ref_remap = true;
domain->sig.dif.app_tag = le16_to_cpu(cmd->rw.apptag);
domain->sig.dif.apptag_check_mask = le16_to_cpu(cmd->rw.appmask);
domain->sig.dif.app_tag = le16_to_cpu(cmd->rw.lbat);
domain->sig.dif.apptag_check_mask = le16_to_cpu(cmd->rw.lbatm);
domain->sig.dif.app_escape = true;
if (pi_type == NVME_NS_DPS_PI_TYPE3)
domain->sig.dif.ref_escape = true;
......@@ -1876,6 +1876,8 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
*/
priv.hrqsize = cpu_to_le16(queue->queue_size);
priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize);
/* cntlid should only be set when creating an I/O queue */
priv.cntlid = cpu_to_le16(ctrl->ctrl.cntlid);
}
ret = rdma_connect_locked(queue->cm_id, &param);
......
......@@ -664,19 +664,6 @@ static DEVICE_ATTR(dhchap_ctrl_secret, S_IRUGO | S_IWUSR,
nvme_ctrl_dhchap_ctrl_secret_show, nvme_ctrl_dhchap_ctrl_secret_store);
#endif
#ifdef CONFIG_NVME_TCP_TLS
static ssize_t tls_key_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
if (!ctrl->tls_key)
return 0;
return sysfs_emit(buf, "%08x", key_serial(ctrl->tls_key));
}
static DEVICE_ATTR_RO(tls_key);
#endif
static struct attribute *nvme_dev_attrs[] = {
&dev_attr_reset_controller.attr,
&dev_attr_rescan_controller.attr,
......@@ -703,9 +690,6 @@ static struct attribute *nvme_dev_attrs[] = {
#ifdef CONFIG_NVME_HOST_AUTH
&dev_attr_dhchap_secret.attr,
&dev_attr_dhchap_ctrl_secret.attr,
#endif
#ifdef CONFIG_NVME_TCP_TLS
&dev_attr_tls_key.attr,
#endif
&dev_attr_adm_passthru_err_log_enabled.attr,
NULL
......@@ -737,11 +721,6 @@ static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
if (a == &dev_attr_dhchap_ctrl_secret.attr && !ctrl->opts)
return 0;
#endif
#ifdef CONFIG_NVME_TCP_TLS
if (a == &dev_attr_tls_key.attr &&
(!ctrl->opts || strcmp(ctrl->opts->transport, "tcp")))
return 0;
#endif
return a->mode;
}
......@@ -752,8 +731,77 @@ const struct attribute_group nvme_dev_attrs_group = {
};
EXPORT_SYMBOL_GPL(nvme_dev_attrs_group);
#ifdef CONFIG_NVME_TCP_TLS
static ssize_t tls_key_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
if (!ctrl->tls_pskid)
return 0;
return sysfs_emit(buf, "%08x\n", ctrl->tls_pskid);
}
static DEVICE_ATTR_RO(tls_key);
static ssize_t tls_configured_key_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
struct key *key = ctrl->opts->tls_key;
return sysfs_emit(buf, "%08x\n", key_serial(key));
}
static DEVICE_ATTR_RO(tls_configured_key);
static ssize_t tls_keyring_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
struct key *keyring = ctrl->opts->keyring;
return sysfs_emit(buf, "%s\n", keyring->description);
}
static DEVICE_ATTR_RO(tls_keyring);
static struct attribute *nvme_tls_attrs[] = {
&dev_attr_tls_key.attr,
&dev_attr_tls_configured_key.attr,
&dev_attr_tls_keyring.attr,
};
static umode_t nvme_tls_attrs_are_visible(struct kobject *kobj,
struct attribute *a, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
if (!ctrl->opts || strcmp(ctrl->opts->transport, "tcp"))
return 0;
if (a == &dev_attr_tls_key.attr &&
!ctrl->opts->tls)
return 0;
if (a == &dev_attr_tls_configured_key.attr &&
!ctrl->opts->tls_key)
return 0;
if (a == &dev_attr_tls_keyring.attr &&
!ctrl->opts->keyring)
return 0;
return a->mode;
}
const struct attribute_group nvme_tls_attrs_group = {
.attrs = nvme_tls_attrs,
.is_visible = nvme_tls_attrs_are_visible,
};
#endif
const struct attribute_group *nvme_dev_attr_groups[] = {
&nvme_dev_attrs_group,
#ifdef CONFIG_NVME_TCP_TLS
&nvme_tls_attrs_group,
#endif
NULL,
};
......
......@@ -165,6 +165,7 @@ struct nvme_tcp_queue {
bool hdr_digest;
bool data_digest;
bool tls_enabled;
struct ahash_request *rcv_hash;
struct ahash_request *snd_hash;
__le32 exp_ddgst;
......@@ -213,7 +214,21 @@ static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
return queue - queue->ctrl->queues;
}
static inline bool nvme_tcp_tls(struct nvme_ctrl *ctrl)
/*
* Check if the queue is TLS encrypted
*/
static inline bool nvme_tcp_queue_tls(struct nvme_tcp_queue *queue)
{
if (!IS_ENABLED(CONFIG_NVME_TCP_TLS))
return 0;
return queue->tls_enabled;
}
/*
* Check if TLS is configured for the controller.
*/
static inline bool nvme_tcp_tls_configured(struct nvme_ctrl *ctrl)
{
if (!IS_ENABLED(CONFIG_NVME_TCP_TLS))
return 0;
......@@ -368,7 +383,7 @@ static inline bool nvme_tcp_queue_has_pending(struct nvme_tcp_queue *queue)
static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
{
return !nvme_tcp_tls(&queue->ctrl->ctrl) &&
return !nvme_tcp_queue_tls(queue) &&
nvme_tcp_queue_has_pending(queue);
}
......@@ -1427,7 +1442,7 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
memset(&msg, 0, sizeof(msg));
iov.iov_base = icresp;
iov.iov_len = sizeof(*icresp);
if (nvme_tcp_tls(&queue->ctrl->ctrl)) {
if (nvme_tcp_queue_tls(queue)) {
msg.msg_control = cbuf;
msg.msg_controllen = sizeof(cbuf);
}
......@@ -1439,7 +1454,7 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
goto free_icresp;
}
ret = -ENOTCONN;
if (nvme_tcp_tls(&queue->ctrl->ctrl)) {
if (nvme_tcp_queue_tls(queue)) {
ctype = tls_get_record_type(queue->sock->sk,
(struct cmsghdr *)cbuf);
if (ctype != TLS_RECORD_TYPE_DATA) {
......@@ -1581,13 +1596,16 @@ static void nvme_tcp_tls_done(void *data, int status, key_serial_t pskid)
goto out_complete;
}
tls_key = key_lookup(pskid);
tls_key = nvme_tls_key_lookup(pskid);
if (IS_ERR(tls_key)) {
dev_warn(ctrl->ctrl.device, "queue %d: Invalid key %x\n",
qid, pskid);
queue->tls_err = -ENOKEY;
} else {
ctrl->ctrl.tls_key = tls_key;
queue->tls_enabled = true;
if (qid == 0)
ctrl->ctrl.tls_pskid = key_serial(tls_key);
key_put(tls_key);
queue->tls_err = 0;
}
......@@ -1768,7 +1786,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid,
}
/* If PSKs are configured try to start TLS */
if (IS_ENABLED(CONFIG_NVME_TCP_TLS) && pskid) {
if (nvme_tcp_tls_configured(nctrl) && pskid) {
ret = nvme_tcp_start_tls(nctrl, queue, pskid);
if (ret)
goto err_init_connect;
......@@ -1829,6 +1847,8 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
mutex_lock(&queue->queue_lock);
if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
__nvme_tcp_stop_queue(queue);
/* Stopping the queue will disable TLS */
queue->tls_enabled = false;
mutex_unlock(&queue->queue_lock);
}
......@@ -1925,16 +1945,17 @@ static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
int ret;
key_serial_t pskid = 0;
if (nvme_tcp_tls(ctrl)) {
if (nvme_tcp_tls_configured(ctrl)) {
if (ctrl->opts->tls_key)
pskid = key_serial(ctrl->opts->tls_key);
else
else {
pskid = nvme_tls_psk_default(ctrl->opts->keyring,
ctrl->opts->host->nqn,
ctrl->opts->subsysnqn);
if (!pskid) {
dev_err(ctrl->device, "no valid PSK found\n");
return -ENOKEY;
if (!pskid) {
dev_err(ctrl->device, "no valid PSK found\n");
return -ENOKEY;
}
}
}
......@@ -1957,13 +1978,14 @@ static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
{
int i, ret;
if (nvme_tcp_tls(ctrl) && !ctrl->tls_key) {
if (nvme_tcp_tls_configured(ctrl) && !ctrl->tls_pskid) {
dev_err(ctrl->device, "no PSK negotiated\n");
return -ENOKEY;
}
for (i = 1; i < ctrl->queue_count; i++) {
ret = nvme_tcp_alloc_queue(ctrl, i,
key_serial(ctrl->tls_key));
ctrl->tls_pskid);
if (ret)
goto out_free_queues;
}
......@@ -2144,6 +2166,11 @@ static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
if (remove)
nvme_unquiesce_admin_queue(ctrl);
nvme_tcp_destroy_admin_queue(ctrl, remove);
if (ctrl->tls_pskid) {
dev_dbg(ctrl->device, "Wipe negotiated TLS_PSK %08x\n",
ctrl->tls_pskid);
ctrl->tls_pskid = 0;
}
}
static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
......
......@@ -1005,8 +1005,6 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
if (nvme_is_fabrics(cmd))
return nvmet_parse_fabrics_admin_cmd(req);
if (unlikely(!nvmet_check_auth_status(req)))
return NVME_SC_AUTH_REQUIRED | NVME_STATUS_DNR;
if (nvmet_is_disc_subsys(nvmet_req_subsys(req)))
return nvmet_parse_discovery_cmd(req);
......
......@@ -25,6 +25,18 @@ int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
unsigned char key_hash;
char *dhchap_secret;
if (!strlen(secret)) {
if (set_ctrl) {
kfree(host->dhchap_ctrl_secret);
host->dhchap_ctrl_secret = NULL;
host->dhchap_ctrl_key_hash = 0;
} else {
kfree(host->dhchap_secret);
host->dhchap_secret = NULL;
host->dhchap_key_hash = 0;
}
return 0;
}
if (sscanf(secret, "DHHC-1:%hhd:%*s", &key_hash) != 1)
return -EINVAL;
if (key_hash > 3) {
......
......@@ -578,8 +578,8 @@ static void nvmet_rdma_set_sig_domain(struct blk_integrity *bi,
if (control & NVME_RW_PRINFO_PRCHK_REF)
domain->sig.dif.ref_remap = true;
domain->sig.dif.app_tag = le16_to_cpu(cmd->rw.apptag);
domain->sig.dif.apptag_check_mask = le16_to_cpu(cmd->rw.appmask);
domain->sig.dif.app_tag = le16_to_cpu(cmd->rw.lbat);
domain->sig.dif.apptag_check_mask = le16_to_cpu(cmd->rw.lbatm);
domain->sig.dif.app_escape = true;
if (pi_type == NVME_NS_DPS_PI_TYPE3)
domain->sig.dif.ref_escape = true;
......
......@@ -12,7 +12,7 @@ key_serial_t nvme_tls_psk_default(struct key *keyring,
const char *hostnqn, const char *subnqn);
key_serial_t nvme_keyring_id(void);
struct key *nvme_tls_key_lookup(key_serial_t key_id);
#else
static inline key_serial_t nvme_tls_psk_default(struct key *keyring,
......@@ -24,5 +24,9 @@ static inline key_serial_t nvme_keyring_id(void)
{
return 0;
}
static inline struct key *nvme_tls_key_lookup(key_serial_t key_id)
{
return ERR_PTR(-ENOTSUPP);
}
#endif /* !CONFIG_NVME_KEYRING */
#endif /* _NVME_KEYRING_H */
......@@ -25,6 +25,7 @@ enum nvme_rdma_cm_status {
NVME_RDMA_CM_NO_RSC = 0x06,
NVME_RDMA_CM_INVALID_IRD = 0x07,
NVME_RDMA_CM_INVALID_ORD = 0x08,
NVME_RDMA_CM_INVALID_CNTLID = 0x09,
};
static inline const char *nvme_rdma_cm_msg(enum nvme_rdma_cm_status status)
......@@ -46,6 +47,8 @@ static inline const char *nvme_rdma_cm_msg(enum nvme_rdma_cm_status status)
return "invalid IRD";
case NVME_RDMA_CM_INVALID_ORD:
return "Invalid ORD";
case NVME_RDMA_CM_INVALID_CNTLID:
return "invalid controller ID";
default:
return "unrecognized reason";
}
......@@ -64,7 +67,8 @@ struct nvme_rdma_cm_req {
__le16 qid;
__le16 hrqsize;
__le16 hsqsize;
u8 rsvd[24];
__le16 cntlid;
u8 rsvd[22];
};
/**
......
......@@ -987,8 +987,8 @@ struct nvme_rw_command {
__le16 control;
__le32 dsmgmt;
__le32 reftag;
__le16 apptag;
__le16 appmask;
__le16 lbat;
__le16 lbatm;
};
enum {
......@@ -1057,8 +1057,8 @@ struct nvme_write_zeroes_cmd {
__le16 control;
__le32 dsmgmt;
__le32 reftag;
__le16 apptag;
__le16 appmask;
__le16 lbat;
__le16 lbatm;
};
enum nvme_zone_mgmt_action {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment