Commit 17daabb5 authored by Alan Brady's avatar Alan Brady Committed by Jeff Kirsher

i40e: Simplify i40e_detect_recover_hung_queue logic

This patch greatly reduces the unneeded complexity in the
i40e_detect_recover_hung_queue code path.  The previous implementation
set a 'hung bit' which would then get cleared while polling.  If the
detection routine was called a second time with the bit already set, we
would issue a software interrupt.  This patch makes it such that if
interrupts are disabled and we have pending TX descriptors, we trigger a
software interrupt since in, the worst case, queues are already clean
and we have an extra interrupt.

Additionally this patch removes the workaround for lost interrupts as
calling napi_reschedule in this context can cause software interrupts to
fire on the wrong CPU.

Change-ID: Iae108582a3ceb6229ed1d22e4ed6e69cf97aad8d
Signed-off-by: default avatarAlan Brady <alan.brady@intel.com>
Tested-by: default avatarAndrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent 373149fc
...@@ -617,7 +617,6 @@ struct i40e_vsi { ...@@ -617,7 +617,6 @@ struct i40e_vsi {
u32 tx_busy; u32 tx_busy;
u64 tx_linearize; u64 tx_linearize;
u64 tx_force_wb; u64 tx_force_wb;
u64 tx_lost_interrupt;
u32 rx_buf_failed; u32 rx_buf_failed;
u32 rx_page_failed; u32 rx_page_failed;
...@@ -703,9 +702,6 @@ struct i40e_q_vector { ...@@ -703,9 +702,6 @@ struct i40e_q_vector {
u8 num_ringpairs; /* total number of ring pairs in vector */ u8 num_ringpairs; /* total number of ring pairs in vector */
#define I40E_Q_VECTOR_HUNG_DETECT 0 /* Bit Index for hung detection logic */
unsigned long hung_detected; /* Set/Reset for hung_detection logic */
cpumask_t affinity_mask; cpumask_t affinity_mask;
struct irq_affinity_notify affinity_notify; struct irq_affinity_notify affinity_notify;
......
...@@ -89,7 +89,6 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = { ...@@ -89,7 +89,6 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = {
I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol), I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
I40E_VSI_STAT("tx_linearize", tx_linearize), I40E_VSI_STAT("tx_linearize", tx_linearize),
I40E_VSI_STAT("tx_force_wb", tx_force_wb), I40E_VSI_STAT("tx_force_wb", tx_force_wb),
I40E_VSI_STAT("tx_lost_interrupt", tx_lost_interrupt),
I40E_VSI_STAT("rx_alloc_fail", rx_buf_failed), I40E_VSI_STAT("rx_alloc_fail", rx_buf_failed),
I40E_VSI_STAT("rx_pg_alloc_fail", rx_page_failed), I40E_VSI_STAT("rx_pg_alloc_fail", rx_page_failed),
}; };
......
...@@ -737,7 +737,6 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) ...@@ -737,7 +737,6 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
struct i40e_eth_stats *oes; struct i40e_eth_stats *oes;
struct i40e_eth_stats *es; /* device's eth stats */ struct i40e_eth_stats *es; /* device's eth stats */
u32 tx_restart, tx_busy; u32 tx_restart, tx_busy;
u64 tx_lost_interrupt;
struct i40e_ring *p; struct i40e_ring *p;
u32 rx_page, rx_buf; u32 rx_page, rx_buf;
u64 bytes, packets; u64 bytes, packets;
...@@ -763,7 +762,6 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) ...@@ -763,7 +762,6 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
rx_b = rx_p = 0; rx_b = rx_p = 0;
tx_b = tx_p = 0; tx_b = tx_p = 0;
tx_restart = tx_busy = tx_linearize = tx_force_wb = 0; tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
tx_lost_interrupt = 0;
rx_page = 0; rx_page = 0;
rx_buf = 0; rx_buf = 0;
rcu_read_lock(); rcu_read_lock();
...@@ -782,7 +780,6 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) ...@@ -782,7 +780,6 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
tx_busy += p->tx_stats.tx_busy; tx_busy += p->tx_stats.tx_busy;
tx_linearize += p->tx_stats.tx_linearize; tx_linearize += p->tx_stats.tx_linearize;
tx_force_wb += p->tx_stats.tx_force_wb; tx_force_wb += p->tx_stats.tx_force_wb;
tx_lost_interrupt += p->tx_stats.tx_lost_interrupt;
/* Rx queue is part of the same block as Tx queue */ /* Rx queue is part of the same block as Tx queue */
p = &p[1]; p = &p[1];
...@@ -801,7 +798,6 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) ...@@ -801,7 +798,6 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
vsi->tx_busy = tx_busy; vsi->tx_busy = tx_busy;
vsi->tx_linearize = tx_linearize; vsi->tx_linearize = tx_linearize;
vsi->tx_force_wb = tx_force_wb; vsi->tx_force_wb = tx_force_wb;
vsi->tx_lost_interrupt = tx_lost_interrupt;
vsi->rx_page_failed = rx_page; vsi->rx_page_failed = rx_page;
vsi->rx_buf_failed = rx_buf; vsi->rx_buf_failed = rx_buf;
...@@ -4508,16 +4504,15 @@ static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf) ...@@ -4508,16 +4504,15 @@ static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
* @vsi: Pointer to VSI struct * @vsi: Pointer to VSI struct
* *
* This function checks specified queue for given VSI. Detects hung condition. * This function checks specified queue for given VSI. Detects hung condition.
* Sets hung bit since it is two step process. Before next run of service task * We proactively detect hung TX queues by checking if interrupts are disabled
* if napi_poll runs, it reset 'hung' bit for respective q_vector. If not, * but there are pending descriptors. If it appears hung, attempt to recover
* hung condition remain unchanged and during subsequent run, this function * by triggering a SW interrupt.
* issues SW interrupt to recover from hung condition.
**/ **/
static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi) static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
{ {
struct i40e_ring *tx_ring = NULL; struct i40e_ring *tx_ring = NULL;
struct i40e_pf *pf; struct i40e_pf *pf;
u32 head, val, tx_pending_hw; u32 val, tx_pending;
int i; int i;
pf = vsi->back; pf = vsi->back;
...@@ -4543,47 +4538,15 @@ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi) ...@@ -4543,47 +4538,15 @@ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
else else
val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0); val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
head = i40e_get_head(tx_ring); tx_pending = i40e_get_tx_pending(tx_ring);
tx_pending_hw = i40e_get_tx_pending(tx_ring, false); /* Interrupts are disabled and TX pending is non-zero,
* trigger the SW interrupt (don't wait). Worst case
/* HW is done executing descriptors, updated HEAD write back, * there will be one extra interrupt which may result
* but SW hasn't processed those descriptors. If interrupt is * into not cleaning any queues because queues are cleaned.
* not generated from this point ON, it could result into
* dev_watchdog detecting timeout on those netdev_queue,
* hence proactively trigger SW interrupt.
*/ */
if (tx_pending_hw && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) { if (tx_pending && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK)))
/* NAPI Poll didn't run and clear since it was set */ i40e_force_wb(vsi, tx_ring->q_vector);
if (test_and_clear_bit(I40E_Q_VECTOR_HUNG_DETECT,
&tx_ring->q_vector->hung_detected)) {
netdev_info(vsi->netdev, "VSI_seid %d, Hung TX queue %d, tx_pending_hw: %d, NTC:0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x\n",
vsi->seid, q_idx, tx_pending_hw,
tx_ring->next_to_clean, head,
tx_ring->next_to_use,
readl(tx_ring->tail));
netdev_info(vsi->netdev, "VSI_seid %d, Issuing force_wb for TX queue %d, Interrupt Reg: 0x%x\n",
vsi->seid, q_idx, val);
i40e_force_wb(vsi, tx_ring->q_vector);
} else {
/* First Chance - detected possible hung */
set_bit(I40E_Q_VECTOR_HUNG_DETECT,
&tx_ring->q_vector->hung_detected);
}
}
/* This is the case where we have interrupts missing,
* so the tx_pending in HW will most likely be 0, but we
* will have tx_pending in SW since the WB happened but the
* interrupt got lost.
*/
if ((!tx_pending_hw) && i40e_get_tx_pending(tx_ring, true) &&
(!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) {
local_bh_disable();
if (napi_reschedule(&tx_ring->q_vector->napi))
tx_ring->tx_stats.tx_lost_interrupt++;
local_bh_enable();
}
} }
/** /**
......
...@@ -711,19 +711,15 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring) ...@@ -711,19 +711,15 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
/** /**
* i40e_get_tx_pending - how many tx descriptors not processed * i40e_get_tx_pending - how many tx descriptors not processed
* @tx_ring: the ring of descriptors * @tx_ring: the ring of descriptors
* @in_sw: is tx_pending being checked in SW or HW
* *
* Since there is no access to the ring head register * Since there is no access to the ring head register
* in XL710, we need to use our local copies * in XL710, we need to use our local copies
**/ **/
u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw) u32 i40e_get_tx_pending(struct i40e_ring *ring)
{ {
u32 head, tail; u32 head, tail;
if (!in_sw) head = i40e_get_head(ring);
head = i40e_get_head(ring);
else
head = ring->next_to_clean;
tail = readl(ring->tail); tail = readl(ring->tail);
if (head != tail) if (head != tail)
...@@ -846,7 +842,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, ...@@ -846,7 +842,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
* them to be written back in case we stay in NAPI. * them to be written back in case we stay in NAPI.
* In this mode on X722 we do not enable Interrupt. * In this mode on X722 we do not enable Interrupt.
*/ */
unsigned int j = i40e_get_tx_pending(tx_ring, false); unsigned int j = i40e_get_tx_pending(tx_ring);
if (budget && if (budget &&
((j / WB_STRIDE) == 0) && (j > 0) && ((j / WB_STRIDE) == 0) && (j > 0) &&
...@@ -2126,8 +2122,6 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) ...@@ -2126,8 +2122,6 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
return 0; return 0;
} }
/* Clear hung_detected bit */
clear_bit(I40E_Q_VECTOR_HUNG_DETECT, &q_vector->hung_detected);
/* Since the actual Tx work is minimal, we can give the Tx a larger /* Since the actual Tx work is minimal, we can give the Tx a larger
* budget and be more aggressive about cleaning up the Tx descriptors. * budget and be more aggressive about cleaning up the Tx descriptors.
*/ */
......
...@@ -275,7 +275,6 @@ struct i40e_tx_queue_stats { ...@@ -275,7 +275,6 @@ struct i40e_tx_queue_stats {
u64 tx_done_old; u64 tx_done_old;
u64 tx_linearize; u64 tx_linearize;
u64 tx_force_wb; u64 tx_force_wb;
u64 tx_lost_interrupt;
}; };
struct i40e_rx_queue_stats { struct i40e_rx_queue_stats {
...@@ -400,7 +399,7 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring); ...@@ -400,7 +399,7 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring);
void i40e_free_rx_resources(struct i40e_ring *rx_ring); void i40e_free_rx_resources(struct i40e_ring *rx_ring);
int i40e_napi_poll(struct napi_struct *napi, int budget); int i40e_napi_poll(struct napi_struct *napi, int budget);
void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector); void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw); u32 i40e_get_tx_pending(struct i40e_ring *ring);
int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size); int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
bool __i40e_chk_linearize(struct sk_buff *skb); bool __i40e_chk_linearize(struct sk_buff *skb);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment