Commit 875b4e2a authored by Jon Derrick's avatar Jon Derrick Committed by Bjorn Helgaas

PCI: vmd: Create IRQ allocation helper

Move the IRQ allocation and SRCU initialization code to a new helper.  No
functional changes.

Link: https://lore.kernel.org/r/20200728194945.14126-5-jonathan.derrick@intel.comSigned-off-by: default avatarJon Derrick <jonathan.derrick@intel.com>
Signed-off-by: default avatarLorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Signed-off-by: default avatarBjorn Helgaas <bhelgaas@google.com>
Reviewed-by: default avatarAndy Shevchenko <andriy.shevchenko@intel.com>
parent 1552b11b
...@@ -528,6 +528,55 @@ static int vmd_get_bus_number_start(struct vmd_dev *vmd) ...@@ -528,6 +528,55 @@ static int vmd_get_bus_number_start(struct vmd_dev *vmd)
return 0; return 0;
} }
static irqreturn_t vmd_irq(int irq, void *data)
{
struct vmd_irq_list *irqs = data;
struct vmd_irq *vmdirq;
int idx;
idx = srcu_read_lock(&irqs->srcu);
list_for_each_entry_rcu(vmdirq, &irqs->irq_list, node)
generic_handle_irq(vmdirq->virq);
srcu_read_unlock(&irqs->srcu, idx);
return IRQ_HANDLED;
}
static int vmd_alloc_irqs(struct vmd_dev *vmd)
{
struct pci_dev *dev = vmd->dev;
int i, err;
vmd->msix_count = pci_msix_vec_count(dev);
if (vmd->msix_count < 0)
return -ENODEV;
vmd->msix_count = pci_alloc_irq_vectors(dev, 1, vmd->msix_count,
PCI_IRQ_MSIX);
if (vmd->msix_count < 0)
return vmd->msix_count;
vmd->irqs = devm_kcalloc(&dev->dev, vmd->msix_count, sizeof(*vmd->irqs),
GFP_KERNEL);
if (!vmd->irqs)
return -ENOMEM;
for (i = 0; i < vmd->msix_count; i++) {
err = init_srcu_struct(&vmd->irqs[i].srcu);
if (err)
return err;
INIT_LIST_HEAD(&vmd->irqs[i].irq_list);
err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i),
vmd_irq, IRQF_NO_THREAD,
"vmd", &vmd->irqs[i]);
if (err)
return err;
}
return 0;
}
static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
{ {
struct pci_sysdata *sd = &vmd->sysdata; struct pci_sysdata *sd = &vmd->sysdata;
...@@ -663,24 +712,10 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) ...@@ -663,24 +712,10 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
return 0; return 0;
} }
static irqreturn_t vmd_irq(int irq, void *data)
{
struct vmd_irq_list *irqs = data;
struct vmd_irq *vmdirq;
int idx;
idx = srcu_read_lock(&irqs->srcu);
list_for_each_entry_rcu(vmdirq, &irqs->irq_list, node)
generic_handle_irq(vmdirq->virq);
srcu_read_unlock(&irqs->srcu, idx);
return IRQ_HANDLED;
}
static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id) static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
{ {
struct vmd_dev *vmd; struct vmd_dev *vmd;
int i, err; int err;
if (resource_size(&dev->resource[VMD_CFGBAR]) < (1 << 20)) if (resource_size(&dev->resource[VMD_CFGBAR]) < (1 << 20))
return -ENOMEM; return -ENOMEM;
...@@ -703,32 +738,9 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id) ...@@ -703,32 +738,9 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32))) dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32)))
return -ENODEV; return -ENODEV;
vmd->msix_count = pci_msix_vec_count(dev); err = vmd_alloc_irqs(vmd);
if (vmd->msix_count < 0) if (err)
return -ENODEV; return err;
vmd->msix_count = pci_alloc_irq_vectors(dev, 1, vmd->msix_count,
PCI_IRQ_MSIX);
if (vmd->msix_count < 0)
return vmd->msix_count;
vmd->irqs = devm_kcalloc(&dev->dev, vmd->msix_count, sizeof(*vmd->irqs),
GFP_KERNEL);
if (!vmd->irqs)
return -ENOMEM;
for (i = 0; i < vmd->msix_count; i++) {
err = init_srcu_struct(&vmd->irqs[i].srcu);
if (err)
return err;
INIT_LIST_HEAD(&vmd->irqs[i].irq_list);
err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i),
vmd_irq, IRQF_NO_THREAD,
"vmd", &vmd->irqs[i]);
if (err)
return err;
}
spin_lock_init(&vmd->cfg_lock); spin_lock_init(&vmd->cfg_lock);
pci_set_drvdata(dev, vmd); pci_set_drvdata(dev, vmd);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment