Commit 104d9c7f authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Martin K. Petersen

scsi: csiostor: switch to pci_alloc_irq_vectors

And get automatic MSI-X affinity for free.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarVarun Prakash <varun@chelsio.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 75106523
...@@ -95,7 +95,6 @@ enum { ...@@ -95,7 +95,6 @@ enum {
}; };
struct csio_msix_entries { struct csio_msix_entries {
unsigned short vector; /* Assigned MSI-X vector */
void *dev_id; /* Priv object associated w/ this msix*/ void *dev_id; /* Priv object associated w/ this msix*/
char desc[24]; /* Description of this vector */ char desc[24]; /* Description of this vector */
}; };
......
...@@ -383,17 +383,15 @@ csio_request_irqs(struct csio_hw *hw) ...@@ -383,17 +383,15 @@ csio_request_irqs(struct csio_hw *hw)
int rv, i, j, k = 0; int rv, i, j, k = 0;
struct csio_msix_entries *entryp = &hw->msix_entries[0]; struct csio_msix_entries *entryp = &hw->msix_entries[0];
struct csio_scsi_cpu_info *info; struct csio_scsi_cpu_info *info;
struct pci_dev *pdev = hw->pdev;
if (hw->intr_mode != CSIO_IM_MSIX) { if (hw->intr_mode != CSIO_IM_MSIX) {
rv = request_irq(hw->pdev->irq, csio_fcoe_isr, rv = request_irq(pci_irq_vector(pdev, 0), csio_fcoe_isr,
(hw->intr_mode == CSIO_IM_MSI) ? hw->intr_mode == CSIO_IM_MSI ? 0 : IRQF_SHARED,
0 : IRQF_SHARED,
KBUILD_MODNAME, hw); KBUILD_MODNAME, hw);
if (rv) { if (rv) {
if (hw->intr_mode == CSIO_IM_MSI)
pci_disable_msi(hw->pdev);
csio_err(hw, "Failed to allocate interrupt line.\n"); csio_err(hw, "Failed to allocate interrupt line.\n");
return -EINVAL; goto out_free_irqs;
} }
goto out; goto out;
...@@ -402,22 +400,22 @@ csio_request_irqs(struct csio_hw *hw) ...@@ -402,22 +400,22 @@ csio_request_irqs(struct csio_hw *hw)
/* Add the MSIX vector descriptions */ /* Add the MSIX vector descriptions */
csio_add_msix_desc(hw); csio_add_msix_desc(hw);
rv = request_irq(entryp[k].vector, csio_nondata_isr, 0, rv = request_irq(pci_irq_vector(pdev, k), csio_nondata_isr, 0,
entryp[k].desc, hw); entryp[k].desc, hw);
if (rv) { if (rv) {
csio_err(hw, "IRQ request failed for vec %d err:%d\n", csio_err(hw, "IRQ request failed for vec %d err:%d\n",
entryp[k].vector, rv); pci_irq_vector(pdev, k), rv);
goto err; goto out_free_irqs;
} }
entryp[k++].dev_id = (void *)hw; entryp[k++].dev_id = hw;
rv = request_irq(entryp[k].vector, csio_fwevt_isr, 0, rv = request_irq(pci_irq_vector(pdev, k), csio_fwevt_isr, 0,
entryp[k].desc, hw); entryp[k].desc, hw);
if (rv) { if (rv) {
csio_err(hw, "IRQ request failed for vec %d err:%d\n", csio_err(hw, "IRQ request failed for vec %d err:%d\n",
entryp[k].vector, rv); pci_irq_vector(pdev, k), rv);
goto err; goto out_free_irqs;
} }
entryp[k++].dev_id = (void *)hw; entryp[k++].dev_id = (void *)hw;
...@@ -429,51 +427,31 @@ csio_request_irqs(struct csio_hw *hw) ...@@ -429,51 +427,31 @@ csio_request_irqs(struct csio_hw *hw)
struct csio_scsi_qset *sqset = &hw->sqset[i][j]; struct csio_scsi_qset *sqset = &hw->sqset[i][j];
struct csio_q *q = hw->wrm.q_arr[sqset->iq_idx]; struct csio_q *q = hw->wrm.q_arr[sqset->iq_idx];
rv = request_irq(entryp[k].vector, csio_scsi_isr, 0, rv = request_irq(pci_irq_vector(pdev, k), csio_scsi_isr, 0,
entryp[k].desc, q); entryp[k].desc, q);
if (rv) { if (rv) {
csio_err(hw, csio_err(hw,
"IRQ request failed for vec %d err:%d\n", "IRQ request failed for vec %d err:%d\n",
entryp[k].vector, rv); pci_irq_vector(pdev, k), rv);
goto err; goto out_free_irqs;
} }
entryp[k].dev_id = (void *)q; entryp[k].dev_id = q;
} /* for all scsi cpus */ } /* for all scsi cpus */
} /* for all ports */ } /* for all ports */
out: out:
hw->flags |= CSIO_HWF_HOST_INTR_ENABLED; hw->flags |= CSIO_HWF_HOST_INTR_ENABLED;
return 0; return 0;
err: out_free_irqs:
for (i = 0; i < k; i++) { for (i = 0; i < k; i++)
entryp = &hw->msix_entries[i]; free_irq(pci_irq_vector(pdev, i), hw->msix_entries[i].dev_id);
free_irq(entryp->vector, entryp->dev_id); pci_free_irq_vectors(hw->pdev);
}
pci_disable_msix(hw->pdev);
return -EINVAL; return -EINVAL;
} }
static void
csio_disable_msix(struct csio_hw *hw, bool free)
{
int i;
struct csio_msix_entries *entryp;
int cnt = hw->num_sqsets + CSIO_EXTRA_VECS;
if (free) {
for (i = 0; i < cnt; i++) {
entryp = &hw->msix_entries[i];
free_irq(entryp->vector, entryp->dev_id);
}
}
pci_disable_msix(hw->pdev);
}
/* Reduce per-port max possible CPUs */ /* Reduce per-port max possible CPUs */
static void static void
csio_reduce_sqsets(struct csio_hw *hw, int cnt) csio_reduce_sqsets(struct csio_hw *hw, int cnt)
...@@ -500,10 +478,9 @@ static int ...@@ -500,10 +478,9 @@ static int
csio_enable_msix(struct csio_hw *hw) csio_enable_msix(struct csio_hw *hw)
{ {
int i, j, k, n, min, cnt; int i, j, k, n, min, cnt;
struct csio_msix_entries *entryp;
struct msix_entry *entries;
int extra = CSIO_EXTRA_VECS; int extra = CSIO_EXTRA_VECS;
struct csio_scsi_cpu_info *info; struct csio_scsi_cpu_info *info;
struct irq_affinity desc = { .pre_vectors = 2 };
min = hw->num_pports + extra; min = hw->num_pports + extra;
cnt = hw->num_sqsets + extra; cnt = hw->num_sqsets + extra;
...@@ -512,50 +489,35 @@ csio_enable_msix(struct csio_hw *hw) ...@@ -512,50 +489,35 @@ csio_enable_msix(struct csio_hw *hw)
if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS || !csio_is_hw_master(hw)) if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS || !csio_is_hw_master(hw))
cnt = min_t(uint8_t, hw->cfg_niq, cnt); cnt = min_t(uint8_t, hw->cfg_niq, cnt);
entries = kzalloc(sizeof(struct msix_entry) * cnt, GFP_KERNEL);
if (!entries)
return -ENOMEM;
for (i = 0; i < cnt; i++)
entries[i].entry = (uint16_t)i;
csio_dbg(hw, "FW supp #niq:%d, trying %d msix's\n", hw->cfg_niq, cnt); csio_dbg(hw, "FW supp #niq:%d, trying %d msix's\n", hw->cfg_niq, cnt);
cnt = pci_enable_msix_range(hw->pdev, entries, min, cnt); cnt = pci_alloc_irq_vectors_affinity(hw->pdev, min, cnt,
if (cnt < 0) { PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, &desc);
kfree(entries); if (cnt < 0)
return cnt; return cnt;
}
if (cnt < (hw->num_sqsets + extra)) { if (cnt < (hw->num_sqsets + extra)) {
csio_dbg(hw, "Reducing sqsets to %d\n", cnt - extra); csio_dbg(hw, "Reducing sqsets to %d\n", cnt - extra);
csio_reduce_sqsets(hw, cnt - extra); csio_reduce_sqsets(hw, cnt - extra);
} }
/* Save off vectors */
for (i = 0; i < cnt; i++) {
entryp = &hw->msix_entries[i];
entryp->vector = entries[i].vector;
}
/* Distribute vectors */ /* Distribute vectors */
k = 0; k = 0;
csio_set_nondata_intr_idx(hw, entries[k].entry); csio_set_nondata_intr_idx(hw, k);
csio_set_mb_intr_idx(csio_hw_to_mbm(hw), entries[k++].entry); csio_set_mb_intr_idx(csio_hw_to_mbm(hw), k++);
csio_set_fwevt_intr_idx(hw, entries[k++].entry); csio_set_fwevt_intr_idx(hw, k++);
for (i = 0; i < hw->num_pports; i++) { for (i = 0; i < hw->num_pports; i++) {
info = &hw->scsi_cpu_info[i]; info = &hw->scsi_cpu_info[i];
for (j = 0; j < hw->num_scsi_msix_cpus; j++) { for (j = 0; j < hw->num_scsi_msix_cpus; j++) {
n = (j % info->max_cpus) + k; n = (j % info->max_cpus) + k;
hw->sqset[i][j].intr_idx = entries[n].entry; hw->sqset[i][j].intr_idx = n;
} }
k += info->max_cpus; k += info->max_cpus;
} }
kfree(entries);
return 0; return 0;
} }
...@@ -597,22 +559,26 @@ csio_intr_disable(struct csio_hw *hw, bool free) ...@@ -597,22 +559,26 @@ csio_intr_disable(struct csio_hw *hw, bool free)
{ {
csio_hw_intr_disable(hw); csio_hw_intr_disable(hw);
if (free) {
int i;
switch (hw->intr_mode) { switch (hw->intr_mode) {
case CSIO_IM_MSIX: case CSIO_IM_MSIX:
csio_disable_msix(hw, free); for (i = 0; i < hw->num_sqsets + CSIO_EXTRA_VECS; i++) {
free_irq(pci_irq_vector(hw->pdev, i),
hw->msix_entries[i].dev_id);
}
break; break;
case CSIO_IM_MSI: case CSIO_IM_MSI:
if (free)
free_irq(hw->pdev->irq, hw);
pci_disable_msi(hw->pdev);
break;
case CSIO_IM_INTX: case CSIO_IM_INTX:
if (free) free_irq(pci_irq_vector(hw->pdev, 0), hw);
free_irq(hw->pdev->irq, hw);
break; break;
default: default:
break; break;
} }
}
pci_free_irq_vectors(hw->pdev);
hw->intr_mode = CSIO_IM_NONE; hw->intr_mode = CSIO_IM_NONE;
hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED; hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment