Commit 8f16bc97 authored by Sudarsana Kalluru's avatar Sudarsana Kalluru Committed by David S. Miller

qed: Correct slowpath interrupt scheme

When using INTa, ISR might be called before device is configured
for INTa [E.g., due to other device asserting the shared interrupt line],
in which case the ISR would read the SISR registers that shouldn't be
read unless HW is already configured for INTa. This might break interrupts
later on. There's also an MSI-X issue due to this difference, although
it's mostly theoretical.

This patch changes the initialization order, calling request_irq() for the
slowpath interrupt only after the chip is configured for working
in the preferred interrupt mode.
Signed-off-by: default avatarSudarsana Kalluru <Sudarsana.Kalluru@qlogic.com>
Signed-off-by: default avatarManish Chopra <manish.chopra@qlogic.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent c78df14e
...@@ -299,6 +299,7 @@ struct qed_hwfn { ...@@ -299,6 +299,7 @@ struct qed_hwfn {
/* Flag indicating whether interrupts are enabled or not*/ /* Flag indicating whether interrupts are enabled or not*/
bool b_int_enabled; bool b_int_enabled;
bool b_int_requested;
struct qed_mcp_info *mcp_info; struct qed_mcp_info *mcp_info;
...@@ -491,6 +492,8 @@ u32 qed_unzip_data(struct qed_hwfn *p_hwfn, ...@@ -491,6 +492,8 @@ u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
u32 input_len, u8 *input_buf, u32 input_len, u8 *input_buf,
u32 max_size, u8 *unzip_buf); u32 max_size, u8 *unzip_buf);
int qed_slowpath_irq_req(struct qed_hwfn *hwfn);
#define QED_ETH_INTERFACE_VERSION 300 #define QED_ETH_INTERFACE_VERSION 300
#endif /* _QED_H */ #endif /* _QED_H */
...@@ -783,22 +783,16 @@ void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn, ...@@ -783,22 +783,16 @@ void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf); qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf);
} }
void qed_int_igu_enable(struct qed_hwfn *p_hwfn, int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
struct qed_ptt *p_ptt,
enum qed_int_mode int_mode) enum qed_int_mode int_mode)
{ {
int i; int rc, i;
p_hwfn->b_int_enabled = 1;
/* Mask non-link attentions */ /* Mask non-link attentions */
for (i = 0; i < 9; i++) for (i = 0; i < 9; i++)
qed_wr(p_hwfn, p_ptt, qed_wr(p_hwfn, p_ptt,
MISC_REG_AEU_ENABLE1_IGU_OUT_0 + (i << 2), 0); MISC_REG_AEU_ENABLE1_IGU_OUT_0 + (i << 2), 0);
/* Enable interrupt Generation */
qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode);
/* Configure AEU signal change to produce attentions for link */ /* Configure AEU signal change to produce attentions for link */
qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff); qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff);
qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff); qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
...@@ -808,6 +802,19 @@ void qed_int_igu_enable(struct qed_hwfn *p_hwfn, ...@@ -808,6 +802,19 @@ void qed_int_igu_enable(struct qed_hwfn *p_hwfn,
/* Unmask AEU signals toward IGU */ /* Unmask AEU signals toward IGU */
qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff); qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
if ((int_mode != QED_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) {
rc = qed_slowpath_irq_req(p_hwfn);
if (rc != 0) {
DP_NOTICE(p_hwfn, "Slowpath IRQ request failed\n");
return -EINVAL;
}
p_hwfn->b_int_requested = true;
}
/* Enable interrupt Generation */
qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode);
p_hwfn->b_int_enabled = 1;
return rc;
} }
void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn,
...@@ -1127,3 +1134,11 @@ int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn, ...@@ -1127,3 +1134,11 @@ int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
return info->igu_sb_cnt; return info->igu_sb_cnt;
} }
void qed_int_disable_post_isr_release(struct qed_dev *cdev)
{
int i;
for_each_hwfn(cdev, i)
cdev->hwfns[i].b_int_requested = false;
}
...@@ -169,10 +169,14 @@ int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn, ...@@ -169,10 +169,14 @@ int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
int *p_iov_blks); int *p_iov_blks);
/** /**
* @file * @brief qed_int_disable_post_isr_release - performs the cleanup post ISR
* release. The API need to be called after releasing all slowpath IRQs
* of the device.
*
* @param cdev
* *
* @brief Interrupt handler
*/ */
void qed_int_disable_post_isr_release(struct qed_dev *cdev);
#define QED_CAU_DEF_RX_TIMER_RES 0 #define QED_CAU_DEF_RX_TIMER_RES 0
#define QED_CAU_DEF_TX_TIMER_RES 0 #define QED_CAU_DEF_TX_TIMER_RES 0
...@@ -366,9 +370,10 @@ void qed_int_setup(struct qed_hwfn *p_hwfn, ...@@ -366,9 +370,10 @@ void qed_int_setup(struct qed_hwfn *p_hwfn,
* @param p_hwfn * @param p_hwfn
* @param p_ptt * @param p_ptt
* @param int_mode * @param int_mode
*
* @return int
*/ */
void qed_int_igu_enable(struct qed_hwfn *p_hwfn, int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
struct qed_ptt *p_ptt,
enum qed_int_mode int_mode); enum qed_int_mode int_mode);
/** /**
......
...@@ -476,41 +476,22 @@ static irqreturn_t qed_single_int(int irq, void *dev_instance) ...@@ -476,41 +476,22 @@ static irqreturn_t qed_single_int(int irq, void *dev_instance)
return rc; return rc;
} }
static int qed_slowpath_irq_req(struct qed_dev *cdev) int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
{ {
int i = 0, rc = 0; struct qed_dev *cdev = hwfn->cdev;
int rc = 0;
u8 id;
if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
/* Request all the slowpath MSI-X vectors */ id = hwfn->my_id;
for (i = 0; i < cdev->num_hwfns; i++) { snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x",
snprintf(cdev->hwfns[i].name, NAME_SIZE, id, cdev->pdev->bus->number,
"sp-%d-%02x:%02x.%02x", PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
i, cdev->pdev->bus->number, rc = request_irq(cdev->int_params.msix_table[id].vector,
PCI_SLOT(cdev->pdev->devfn), qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc);
cdev->hwfns[i].abs_pf_id); if (!rc)
DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP),
rc = request_irq(cdev->int_params.msix_table[i].vector,
qed_msix_sp_int, 0,
cdev->hwfns[i].name,
cdev->hwfns[i].sp_dpc);
if (rc)
break;
DP_VERBOSE(&cdev->hwfns[i],
(NETIF_MSG_INTR | QED_MSG_SP),
"Requested slowpath MSI-X\n"); "Requested slowpath MSI-X\n");
}
if (i != cdev->num_hwfns) {
/* Free already request MSI-X vectors */
for (i--; i >= 0; i--) {
unsigned int vec =
cdev->int_params.msix_table[i].vector;
synchronize_irq(vec);
free_irq(cdev->int_params.msix_table[i].vector,
cdev->hwfns[i].sp_dpc);
}
}
} else { } else {
unsigned long flags = 0; unsigned long flags = 0;
...@@ -534,13 +515,17 @@ static void qed_slowpath_irq_free(struct qed_dev *cdev) ...@@ -534,13 +515,17 @@ static void qed_slowpath_irq_free(struct qed_dev *cdev)
if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
for_each_hwfn(cdev, i) { for_each_hwfn(cdev, i) {
if (!cdev->hwfns[i].b_int_requested)
break;
synchronize_irq(cdev->int_params.msix_table[i].vector); synchronize_irq(cdev->int_params.msix_table[i].vector);
free_irq(cdev->int_params.msix_table[i].vector, free_irq(cdev->int_params.msix_table[i].vector,
cdev->hwfns[i].sp_dpc); cdev->hwfns[i].sp_dpc);
} }
} else { } else {
if (QED_LEADING_HWFN(cdev)->b_int_requested)
free_irq(cdev->pdev->irq, cdev); free_irq(cdev->pdev->irq, cdev);
} }
qed_int_disable_post_isr_release(cdev);
} }
static int qed_nic_stop(struct qed_dev *cdev) static int qed_nic_stop(struct qed_dev *cdev)
...@@ -765,16 +750,11 @@ static int qed_slowpath_start(struct qed_dev *cdev, ...@@ -765,16 +750,11 @@ static int qed_slowpath_start(struct qed_dev *cdev,
if (rc) if (rc)
goto err1; goto err1;
/* Request the slowpath IRQ */
rc = qed_slowpath_irq_req(cdev);
if (rc)
goto err2;
/* Allocate stream for unzipping */ /* Allocate stream for unzipping */
rc = qed_alloc_stream_mem(cdev); rc = qed_alloc_stream_mem(cdev);
if (rc) { if (rc) {
DP_NOTICE(cdev, "Failed to allocate stream memory\n"); DP_NOTICE(cdev, "Failed to allocate stream memory\n");
goto err3; goto err2;
} }
/* Start the slowpath */ /* Start the slowpath */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment