Commit dd353109 authored by Anjali Singhai Jain's avatar Anjali Singhai Jain Committed by Jeff Kirsher

i40e: Add a SW workaround for lost interrupts

This patch adds a workaround for cases where we might have
interrupts that got lost but WB happened.
If that happens without this patch we will see a tx_timeout.
To work around it, this patch goes ahead and reschedules NAPI
in that situation, if NAPI is not already scheduled.
We also add a counter in ethtool to keep track of when
we detect a case of tx_lost_interrupt.

Note: napi_reschedule() can be safely called from process/service_task
context and is done in other drivers as well without an issue.

Change-ID: I00f98f1ce3774524d9421227652bef20fcbd0d20
Signed-off-by: default avatarAnjali Singhai Jain <anjali.singhai@intel.com>
Tested-by: default avatarAndrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent f734dfff
...@@ -512,6 +512,7 @@ struct i40e_vsi { ...@@ -512,6 +512,7 @@ struct i40e_vsi {
u32 tx_busy; u32 tx_busy;
u64 tx_linearize; u64 tx_linearize;
u64 tx_force_wb; u64 tx_force_wb;
u64 tx_lost_interrupt;
u32 rx_buf_failed; u32 rx_buf_failed;
u32 rx_page_failed; u32 rx_page_failed;
......
...@@ -89,6 +89,7 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = { ...@@ -89,6 +89,7 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = {
I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol), I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
I40E_VSI_STAT("tx_linearize", tx_linearize), I40E_VSI_STAT("tx_linearize", tx_linearize),
I40E_VSI_STAT("tx_force_wb", tx_force_wb), I40E_VSI_STAT("tx_force_wb", tx_force_wb),
I40E_VSI_STAT("tx_lost_interrupt", tx_lost_interrupt),
I40E_VSI_STAT("rx_alloc_fail", rx_buf_failed), I40E_VSI_STAT("rx_alloc_fail", rx_buf_failed),
I40E_VSI_STAT("rx_pg_alloc_fail", rx_page_failed), I40E_VSI_STAT("rx_pg_alloc_fail", rx_page_failed),
}; };
......
...@@ -819,6 +819,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) ...@@ -819,6 +819,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
struct i40e_eth_stats *oes; struct i40e_eth_stats *oes;
struct i40e_eth_stats *es; /* device's eth stats */ struct i40e_eth_stats *es; /* device's eth stats */
u32 tx_restart, tx_busy; u32 tx_restart, tx_busy;
u64 tx_lost_interrupt;
struct i40e_ring *p; struct i40e_ring *p;
u32 rx_page, rx_buf; u32 rx_page, rx_buf;
u64 bytes, packets; u64 bytes, packets;
...@@ -844,6 +845,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) ...@@ -844,6 +845,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
rx_b = rx_p = 0; rx_b = rx_p = 0;
tx_b = tx_p = 0; tx_b = tx_p = 0;
tx_restart = tx_busy = tx_linearize = tx_force_wb = 0; tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
tx_lost_interrupt = 0;
rx_page = 0; rx_page = 0;
rx_buf = 0; rx_buf = 0;
rcu_read_lock(); rcu_read_lock();
...@@ -862,6 +864,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) ...@@ -862,6 +864,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
tx_busy += p->tx_stats.tx_busy; tx_busy += p->tx_stats.tx_busy;
tx_linearize += p->tx_stats.tx_linearize; tx_linearize += p->tx_stats.tx_linearize;
tx_force_wb += p->tx_stats.tx_force_wb; tx_force_wb += p->tx_stats.tx_force_wb;
tx_lost_interrupt += p->tx_stats.tx_lost_interrupt;
/* Rx queue is part of the same block as Tx queue */ /* Rx queue is part of the same block as Tx queue */
p = &p[1]; p = &p[1];
...@@ -880,6 +883,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) ...@@ -880,6 +883,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
vsi->tx_busy = tx_busy; vsi->tx_busy = tx_busy;
vsi->tx_linearize = tx_linearize; vsi->tx_linearize = tx_linearize;
vsi->tx_force_wb = tx_force_wb; vsi->tx_force_wb = tx_force_wb;
vsi->tx_lost_interrupt = tx_lost_interrupt;
vsi->rx_page_failed = rx_page; vsi->rx_page_failed = rx_page;
vsi->rx_buf_failed = rx_buf; vsi->rx_buf_failed = rx_buf;
...@@ -4349,7 +4353,7 @@ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi) ...@@ -4349,7 +4353,7 @@ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
{ {
struct i40e_ring *tx_ring = NULL; struct i40e_ring *tx_ring = NULL;
struct i40e_pf *pf; struct i40e_pf *pf;
u32 head, val, tx_pending; u32 head, val, tx_pending_hw;
int i; int i;
pf = vsi->back; pf = vsi->back;
...@@ -4375,16 +4379,9 @@ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi) ...@@ -4375,16 +4379,9 @@ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
else else
val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0); val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
/* Bail out if interrupts are disabled because napi_poll
* execution in-progress or will get scheduled soon.
* napi_poll cleans TX and RX queues and updates 'next_to_clean'.
*/
if (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))
return;
head = i40e_get_head(tx_ring); head = i40e_get_head(tx_ring);
tx_pending = i40e_get_tx_pending(tx_ring); tx_pending_hw = i40e_get_tx_pending(tx_ring, false);
/* HW is done executing descriptors, updated HEAD write back, /* HW is done executing descriptors, updated HEAD write back,
* but SW hasn't processed those descriptors. If interrupt is * but SW hasn't processed those descriptors. If interrupt is
...@@ -4392,12 +4389,12 @@ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi) ...@@ -4392,12 +4389,12 @@ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
* dev_watchdog detecting timeout on those netdev_queue, * dev_watchdog detecting timeout on those netdev_queue,
* hence proactively trigger SW interrupt. * hence proactively trigger SW interrupt.
*/ */
if (tx_pending) { if (tx_pending_hw && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) {
/* NAPI Poll didn't run and clear since it was set */ /* NAPI Poll didn't run and clear since it was set */
if (test_and_clear_bit(I40E_Q_VECTOR_HUNG_DETECT, if (test_and_clear_bit(I40E_Q_VECTOR_HUNG_DETECT,
&tx_ring->q_vector->hung_detected)) { &tx_ring->q_vector->hung_detected)) {
netdev_info(vsi->netdev, "VSI_seid %d, Hung TX queue %d, tx_pending: %d, NTC:0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x\n", netdev_info(vsi->netdev, "VSI_seid %d, Hung TX queue %d, tx_pending_hw: %d, NTC:0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x\n",
vsi->seid, q_idx, tx_pending, vsi->seid, q_idx, tx_pending_hw,
tx_ring->next_to_clean, head, tx_ring->next_to_clean, head,
tx_ring->next_to_use, tx_ring->next_to_use,
readl(tx_ring->tail)); readl(tx_ring->tail));
...@@ -4410,6 +4407,17 @@ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi) ...@@ -4410,6 +4407,17 @@ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
&tx_ring->q_vector->hung_detected); &tx_ring->q_vector->hung_detected);
} }
} }
/* This is the case where we have interrupts missing,
* so the tx_pending in HW will most likely be 0, but we
* will have tx_pending in SW since the WB happened but the
* interrupt got lost.
*/
if ((!tx_pending_hw) && i40e_get_tx_pending(tx_ring, true) &&
(!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) {
if (napi_reschedule(&tx_ring->q_vector->napi))
tx_ring->tx_stats.tx_lost_interrupt++;
}
} }
/** /**
......
...@@ -610,15 +610,19 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring) ...@@ -610,15 +610,19 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
/** /**
* i40e_get_tx_pending - how many tx descriptors not processed * i40e_get_tx_pending - how many tx descriptors not processed
* @tx_ring: the ring of descriptors * @tx_ring: the ring of descriptors
* @in_sw: is tx_pending being checked in SW or HW
* *
* Since there is no access to the ring head register * Since there is no access to the ring head register
* in XL710, we need to use our local copies * in XL710, we need to use our local copies
**/ **/
u32 i40e_get_tx_pending(struct i40e_ring *ring) u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
{ {
u32 head, tail; u32 head, tail;
if (!in_sw)
head = i40e_get_head(ring); head = i40e_get_head(ring);
else
head = ring->next_to_clean;
tail = readl(ring->tail); tail = readl(ring->tail);
if (head != tail) if (head != tail)
...@@ -741,7 +745,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) ...@@ -741,7 +745,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
* them to be written back in case we stay in NAPI. * them to be written back in case we stay in NAPI.
* In this mode on X722 we do not enable Interrupt. * In this mode on X722 we do not enable Interrupt.
*/ */
j = i40e_get_tx_pending(tx_ring); j = i40e_get_tx_pending(tx_ring, false);
if (budget && if (budget &&
((j / (WB_STRIDE + 1)) == 0) && (j != 0) && ((j / (WB_STRIDE + 1)) == 0) && (j != 0) &&
......
...@@ -203,6 +203,7 @@ struct i40e_tx_queue_stats { ...@@ -203,6 +203,7 @@ struct i40e_tx_queue_stats {
u64 tx_done_old; u64 tx_done_old;
u64 tx_linearize; u64 tx_linearize;
u64 tx_force_wb; u64 tx_force_wb;
u64 tx_lost_interrupt;
}; };
struct i40e_rx_queue_stats { struct i40e_rx_queue_stats {
...@@ -338,7 +339,7 @@ int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, ...@@ -338,7 +339,7 @@ int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
struct i40e_ring *tx_ring, u32 *flags); struct i40e_ring *tx_ring, u32 *flags);
#endif #endif
void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector); void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
u32 i40e_get_tx_pending(struct i40e_ring *ring); u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw);
/** /**
* i40e_get_head - Retrieve head from head writeback * i40e_get_head - Retrieve head from head writeback
......
...@@ -129,15 +129,19 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring) ...@@ -129,15 +129,19 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
/** /**
* i40evf_get_tx_pending - how many Tx descriptors not processed * i40evf_get_tx_pending - how many Tx descriptors not processed
* @tx_ring: the ring of descriptors * @tx_ring: the ring of descriptors
* @in_sw: is tx_pending being checked in SW or HW
* *
* Since there is no access to the ring head register * Since there is no access to the ring head register
* in XL710, we need to use our local copies * in XL710, we need to use our local copies
**/ **/
u32 i40evf_get_tx_pending(struct i40e_ring *ring) u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw)
{ {
u32 head, tail; u32 head, tail;
if (!in_sw)
head = i40e_get_head(ring); head = i40e_get_head(ring);
else
head = ring->next_to_clean;
tail = readl(ring->tail); tail = readl(ring->tail);
if (head != tail) if (head != tail)
...@@ -259,7 +263,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) ...@@ -259,7 +263,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
* them to be written back in case we stay in NAPI. * them to be written back in case we stay in NAPI.
* In this mode on X722 we do not enable Interrupt. * In this mode on X722 we do not enable Interrupt.
*/ */
j = i40evf_get_tx_pending(tx_ring); j = i40evf_get_tx_pending(tx_ring, false);
if (budget && if (budget &&
((j / (WB_STRIDE + 1)) == 0) && (j > 0) && ((j / (WB_STRIDE + 1)) == 0) && (j > 0) &&
......
...@@ -202,6 +202,7 @@ struct i40e_tx_queue_stats { ...@@ -202,6 +202,7 @@ struct i40e_tx_queue_stats {
u64 tx_done_old; u64 tx_done_old;
u64 tx_linearize; u64 tx_linearize;
u64 tx_force_wb; u64 tx_force_wb;
u64 tx_lost_interrupt;
}; };
struct i40e_rx_queue_stats { struct i40e_rx_queue_stats {
...@@ -326,7 +327,7 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring); ...@@ -326,7 +327,7 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring);
void i40evf_free_rx_resources(struct i40e_ring *rx_ring); void i40evf_free_rx_resources(struct i40e_ring *rx_ring);
int i40evf_napi_poll(struct napi_struct *napi, int budget); int i40evf_napi_poll(struct napi_struct *napi, int budget);
void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector); void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
u32 i40evf_get_tx_pending(struct i40e_ring *ring); u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw);
/** /**
* i40e_get_head - Retrieve head from head writeback * i40e_get_head - Retrieve head from head writeback
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment