Commit c3414550 authored by Jens Axboe's avatar Jens Axboe

Merge tag 'nvme-6.6-2023-10-18' of git://git.infradead.org/nvme into block-6.6

Pull NVMe fixes from Keith:

"nvme fixes for Linux 6.6

 - nvme-rdma queue fix (Maurizio)
 - nvmet-auth double free fix (Maurizio)
 - nvme-tcp use-after-free fix (Sagi)
 - nvme-auth data direction fix (Martin)
 - nvme passthrough metadata sanitization (Keith)
 - nvme bogus identifiers for multi-controller ssd (Keith)"

* tag 'nvme-6.6-2023-10-18' of git://git.infradead.org/nvme:
  nvme-pci: add BOGUS_NID for Intel 0a54 device
  nvmet-auth: complete a request only after freeing the dhchap pointers
  nvme: sanitize metadata bounce buffer for reads
  nvme-auth: use chap->s2 to indicate bidirectional authentication
  nvmet-tcp: Fix a possible UAF in queue intialization setup
  nvme-rdma: do not try to stop unallocated queues
parents 4eaf0932 5c3f4066
...@@ -341,7 +341,7 @@ static int nvme_auth_process_dhchap_success1(struct nvme_ctrl *ctrl, ...@@ -341,7 +341,7 @@ static int nvme_auth_process_dhchap_success1(struct nvme_ctrl *ctrl,
struct nvmf_auth_dhchap_success1_data *data = chap->buf; struct nvmf_auth_dhchap_success1_data *data = chap->buf;
size_t size = sizeof(*data); size_t size = sizeof(*data);
if (chap->ctrl_key) if (chap->s2)
size += chap->hash_len; size += chap->hash_len;
if (size > CHAP_BUF_SIZE) { if (size > CHAP_BUF_SIZE) {
...@@ -825,7 +825,7 @@ static void nvme_queue_auth_work(struct work_struct *work) ...@@ -825,7 +825,7 @@ static void nvme_queue_auth_work(struct work_struct *work)
goto fail2; goto fail2;
} }
if (chap->ctrl_key) { if (chap->s2) {
/* DH-HMAC-CHAP Step 5: send success2 */ /* DH-HMAC-CHAP Step 5: send success2 */
dev_dbg(ctrl->device, "%s: qid %d send success2\n", dev_dbg(ctrl->device, "%s: qid %d send success2\n",
__func__, chap->qid); __func__, chap->qid);
......
...@@ -108,9 +108,13 @@ static void *nvme_add_user_metadata(struct request *req, void __user *ubuf, ...@@ -108,9 +108,13 @@ static void *nvme_add_user_metadata(struct request *req, void __user *ubuf,
if (!buf) if (!buf)
goto out; goto out;
ret = -EFAULT; if (req_op(req) == REQ_OP_DRV_OUT) {
if ((req_op(req) == REQ_OP_DRV_OUT) && copy_from_user(buf, ubuf, len)) ret = -EFAULT;
goto out_free_meta; if (copy_from_user(buf, ubuf, len))
goto out_free_meta;
} else {
memset(buf, 0, len);
}
bip = bio_integrity_alloc(bio, GFP_KERNEL, 1); bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
if (IS_ERR(bip)) { if (IS_ERR(bip)) {
......
...@@ -3329,7 +3329,8 @@ static const struct pci_device_id nvme_id_table[] = { ...@@ -3329,7 +3329,8 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_VDEVICE(INTEL, 0x0a54), /* Intel P4500/P4600 */ { PCI_VDEVICE(INTEL, 0x0a54), /* Intel P4500/P4600 */
.driver_data = NVME_QUIRK_STRIPE_SIZE | .driver_data = NVME_QUIRK_STRIPE_SIZE |
NVME_QUIRK_DEALLOCATE_ZEROES | NVME_QUIRK_DEALLOCATE_ZEROES |
NVME_QUIRK_IGNORE_DEV_SUBNQN, }, NVME_QUIRK_IGNORE_DEV_SUBNQN |
NVME_QUIRK_BOGUS_NID, },
{ PCI_VDEVICE(INTEL, 0x0a55), /* Dell Express Flash P4600 */ { PCI_VDEVICE(INTEL, 0x0a55), /* Dell Express Flash P4600 */
.driver_data = NVME_QUIRK_STRIPE_SIZE | .driver_data = NVME_QUIRK_STRIPE_SIZE |
NVME_QUIRK_DEALLOCATE_ZEROES, }, NVME_QUIRK_DEALLOCATE_ZEROES, },
......
...@@ -638,6 +638,9 @@ static void __nvme_rdma_stop_queue(struct nvme_rdma_queue *queue) ...@@ -638,6 +638,9 @@ static void __nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue) static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
{ {
if (!test_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
return;
mutex_lock(&queue->queue_lock); mutex_lock(&queue->queue_lock);
if (test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags)) if (test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags))
__nvme_rdma_stop_queue(queue); __nvme_rdma_stop_queue(queue);
......
...@@ -333,19 +333,21 @@ void nvmet_execute_auth_send(struct nvmet_req *req) ...@@ -333,19 +333,21 @@ void nvmet_execute_auth_send(struct nvmet_req *req)
__func__, ctrl->cntlid, req->sq->qid, __func__, ctrl->cntlid, req->sq->qid,
status, req->error_loc); status, req->error_loc);
req->cqe->result.u64 = 0; req->cqe->result.u64 = 0;
nvmet_req_complete(req, status);
if (req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2 && if (req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2 &&
req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) { req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) {
unsigned long auth_expire_secs = ctrl->kato ? ctrl->kato : 120; unsigned long auth_expire_secs = ctrl->kato ? ctrl->kato : 120;
mod_delayed_work(system_wq, &req->sq->auth_expired_work, mod_delayed_work(system_wq, &req->sq->auth_expired_work,
auth_expire_secs * HZ); auth_expire_secs * HZ);
return; goto complete;
} }
/* Final states, clear up variables */ /* Final states, clear up variables */
nvmet_auth_sq_free(req->sq); nvmet_auth_sq_free(req->sq);
if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE2)
nvmet_ctrl_fatal_error(ctrl); nvmet_ctrl_fatal_error(ctrl);
complete:
nvmet_req_complete(req, status);
} }
static int nvmet_auth_challenge(struct nvmet_req *req, void *d, int al) static int nvmet_auth_challenge(struct nvmet_req *req, void *d, int al)
...@@ -514,11 +516,12 @@ void nvmet_execute_auth_receive(struct nvmet_req *req) ...@@ -514,11 +516,12 @@ void nvmet_execute_auth_receive(struct nvmet_req *req)
kfree(d); kfree(d);
done: done:
req->cqe->result.u64 = 0; req->cqe->result.u64 = 0;
nvmet_req_complete(req, status);
if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2) if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2)
nvmet_auth_sq_free(req->sq); nvmet_auth_sq_free(req->sq);
else if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) { else if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
nvmet_auth_sq_free(req->sq); nvmet_auth_sq_free(req->sq);
nvmet_ctrl_fatal_error(ctrl); nvmet_ctrl_fatal_error(ctrl);
} }
nvmet_req_complete(req, status);
} }
...@@ -372,6 +372,7 @@ static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue) ...@@ -372,6 +372,7 @@ static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue)
static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status) static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status)
{ {
queue->rcv_state = NVMET_TCP_RECV_ERR;
if (status == -EPIPE || status == -ECONNRESET) if (status == -EPIPE || status == -ECONNRESET)
kernel_sock_shutdown(queue->sock, SHUT_RDWR); kernel_sock_shutdown(queue->sock, SHUT_RDWR);
else else
...@@ -910,15 +911,11 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue) ...@@ -910,15 +911,11 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
iov.iov_len = sizeof(*icresp); iov.iov_len = sizeof(*icresp);
ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
if (ret < 0) if (ret < 0)
goto free_crypto; return ret; /* queue removal will cleanup */
queue->state = NVMET_TCP_Q_LIVE; queue->state = NVMET_TCP_Q_LIVE;
nvmet_prepare_receive_pdu(queue); nvmet_prepare_receive_pdu(queue);
return 0; return 0;
free_crypto:
if (queue->hdr_digest || queue->data_digest)
nvmet_tcp_free_crypto(queue);
return ret;
} }
static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue, static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment