Commit f4345f05 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'block-6.9-20240510' of git://git.kernel.dk/linux

Pull block fixes from Jens Axboe:

 - NVMe pull request via Keith:
     - nvme target fixes (Sagi, Dan, Maurizo)
     - new vendor quirk for broken MSI (Sean)

 - Virtual boundary fix for a regression in this merge window (Ming)

* tag 'block-6.9-20240510' of git://git.kernel.dk/linux:
  nvmet-rdma: fix possible bad dereference when freeing rsps
  nvmet: prevent sprintf() overflow in nvmet_subsys_nsid_exists()
  nvmet: make nvmet_wq unbound
  nvmet-auth: return the error code to the nvmet_auth_ctrl_hash() callers
  nvme-pci: Add quirk for broken MSIs
  block: set default max segment size in case of virt_boundary
parents ed44935c a7721784
......@@ -188,7 +188,10 @@ static int blk_validate_limits(struct queue_limits *lim)
* bvec and lower layer bio splitting is supposed to handle the two
* correctly.
*/
if (!lim->virt_boundary_mask) {
if (lim->virt_boundary_mask) {
if (!lim->max_segment_size)
lim->max_segment_size = UINT_MAX;
} else {
/*
* The maximum segment size has an odd historic 64k default that
* drivers probably should override. Just like the I/O size we
......
......@@ -162,6 +162,11 @@ enum nvme_quirks {
* Disables simple suspend/resume path.
*/
NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND = (1 << 20),
/*
* MSI (but not MSI-X) interrupts are broken and never fire.
*/
NVME_QUIRK_BROKEN_MSI = (1 << 21),
};
/*
......
......@@ -2224,6 +2224,7 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
.priv = dev,
};
unsigned int irq_queues, poll_queues;
unsigned int flags = PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY;
/*
* Poll queues don't need interrupts, but we need at least one I/O queue
......@@ -2247,8 +2248,10 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
irq_queues = 1;
if (!(dev->ctrl.quirks & NVME_QUIRK_SINGLE_VECTOR))
irq_queues += (nr_io_queues - poll_queues);
return pci_alloc_irq_vectors_affinity(pdev, 1, irq_queues,
PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd);
if (dev->ctrl.quirks & NVME_QUIRK_BROKEN_MSI)
flags &= ~PCI_IRQ_MSI;
return pci_alloc_irq_vectors_affinity(pdev, 1, irq_queues, flags,
&affd);
}
static unsigned int nvme_max_io_queues(struct nvme_dev *dev)
......@@ -2477,6 +2480,7 @@ static int nvme_pci_enable(struct nvme_dev *dev)
{
int result = -ENOMEM;
struct pci_dev *pdev = to_pci_dev(dev->dev);
unsigned int flags = PCI_IRQ_ALL_TYPES;
if (pci_enable_device_mem(pdev))
return result;
......@@ -2493,7 +2497,9 @@ static int nvme_pci_enable(struct nvme_dev *dev)
* interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll
* adjust this later.
*/
result = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
if (dev->ctrl.quirks & NVME_QUIRK_BROKEN_MSI)
flags &= ~PCI_IRQ_MSI;
result = pci_alloc_irq_vectors(pdev, 1, 1, flags);
if (result < 0)
goto disable;
......@@ -3390,6 +3396,8 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
NVME_QUIRK_DISABLE_WRITE_ZEROES|
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE(0x15b7, 0x5008), /* Sandisk SN530 */
.driver_data = NVME_QUIRK_BROKEN_MSI },
{ PCI_DEVICE(0x1987, 0x5012), /* Phison E12 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */
......
......@@ -480,7 +480,7 @@ int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
nvme_auth_free_key(transformed_key);
out_free_tfm:
crypto_free_shash(shash_tfm);
return 0;
return ret;
}
int nvmet_auth_ctrl_exponential(struct nvmet_req *req,
......
......@@ -757,10 +757,9 @@ static struct configfs_attribute *nvmet_ns_attrs[] = {
bool nvmet_subsys_nsid_exists(struct nvmet_subsys *subsys, u32 nsid)
{
struct config_item *ns_item;
char name[4] = {};
char name[12];
if (sprintf(name, "%u", nsid) <= 0)
return false;
snprintf(name, sizeof(name), "%u", nsid);
mutex_lock(&subsys->namespaces_group.cg_subsys->su_mutex);
ns_item = config_group_find_item(&subsys->namespaces_group, name);
mutex_unlock(&subsys->namespaces_group.cg_subsys->su_mutex);
......
......@@ -1686,7 +1686,8 @@ static int __init nvmet_init(void)
if (!buffered_io_wq)
goto out_free_zbd_work_queue;
nvmet_wq = alloc_workqueue("nvmet-wq", WQ_MEM_RECLAIM, 0);
nvmet_wq = alloc_workqueue("nvmet-wq",
WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
if (!nvmet_wq)
goto out_free_buffered_work_queue;
......
......@@ -474,12 +474,8 @@ nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue)
return 0;
out_free:
while (--i >= 0) {
struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
list_del(&rsp->free_list);
nvmet_rdma_free_rsp(ndev, rsp);
}
while (--i >= 0)
nvmet_rdma_free_rsp(ndev, &queue->rsps[i]);
kfree(queue->rsps);
out:
return ret;
......@@ -490,12 +486,8 @@ static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue)
struct nvmet_rdma_device *ndev = queue->dev;
int i, nr_rsps = queue->recv_queue_size * 2;
for (i = 0; i < nr_rsps; i++) {
struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
list_del(&rsp->free_list);
nvmet_rdma_free_rsp(ndev, rsp);
}
for (i = 0; i < nr_rsps; i++)
nvmet_rdma_free_rsp(ndev, &queue->rsps[i]);
kfree(queue->rsps);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment