Commit 7cdaa4cc authored by Tobias Waldekranz's avatar Tobias Waldekranz Committed by David S. Miller

net: ethernet: fec: prevent tx starvation under high rx load

In the ISR, we poll the event register for the queues in need of
service and then enter polled mode. After this point, the event
register will never be read again until we exit polled mode.

In a scenario where a UDP flow is routed back out through the same
interface, i.e. "router-on-a-stick" we'll typically only see an rx
queue event initially. Once we start to process the incoming flow
we'll be locked polled mode, but we'll never clean the tx rings since
that event is never caught.

Eventually the netdev watchdog will trip, causing all buffers to be
dropped and then the process starts over again.

Rework the NAPI poll to keep trying to consome the entire budget as
long as new events are coming in, making sure to service all rx/tx
queues, in priority order, on each pass.

Fixes: 4d494cdc ("net: fec: change data structure to support multiqueue")
Signed-off-by: default avatarTobias Waldekranz <tobias@waldekranz.com>
Tested-by: default avatarFugang Duan <fugang.duan@nxp.com>
Reviewed-by: default avatarFugang Duan <fugang.duan@nxp.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 28b18e4e
...@@ -525,11 +525,6 @@ struct fec_enet_private { ...@@ -525,11 +525,6 @@ struct fec_enet_private {
unsigned int total_tx_ring_size; unsigned int total_tx_ring_size;
unsigned int total_rx_ring_size; unsigned int total_rx_ring_size;
unsigned long work_tx;
unsigned long work_rx;
unsigned long work_ts;
unsigned long work_mdio;
struct platform_device *pdev; struct platform_device *pdev;
int dev_id; int dev_id;
......
...@@ -75,8 +75,6 @@ static void fec_enet_itr_coal_init(struct net_device *ndev); ...@@ -75,8 +75,6 @@ static void fec_enet_itr_coal_init(struct net_device *ndev);
#define DRIVER_NAME "fec" #define DRIVER_NAME "fec"
#define FEC_ENET_GET_QUQUE(_x) ((_x == 0) ? 1 : ((_x == 1) ? 2 : 0))
/* Pause frame feild and FIFO threshold */ /* Pause frame feild and FIFO threshold */
#define FEC_ENET_FCE (1 << 5) #define FEC_ENET_FCE (1 << 5)
#define FEC_ENET_RSEM_V 0x84 #define FEC_ENET_RSEM_V 0x84
...@@ -1248,8 +1246,6 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) ...@@ -1248,8 +1246,6 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
fep = netdev_priv(ndev); fep = netdev_priv(ndev);
queue_id = FEC_ENET_GET_QUQUE(queue_id);
txq = fep->tx_queue[queue_id]; txq = fep->tx_queue[queue_id];
/* get next bdp of dirty_tx */ /* get next bdp of dirty_tx */
nq = netdev_get_tx_queue(ndev, queue_id); nq = netdev_get_tx_queue(ndev, queue_id);
...@@ -1340,17 +1336,14 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) ...@@ -1340,17 +1336,14 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
writel(0, txq->bd.reg_desc_active); writel(0, txq->bd.reg_desc_active);
} }
static void static void fec_enet_tx(struct net_device *ndev)
fec_enet_tx(struct net_device *ndev)
{ {
struct fec_enet_private *fep = netdev_priv(ndev); struct fec_enet_private *fep = netdev_priv(ndev);
u16 queue_id; int i;
/* First process class A queue, then Class B and Best Effort queue */
for_each_set_bit(queue_id, &fep->work_tx, FEC_ENET_MAX_TX_QS) { /* Make sure that AVB queues are processed first. */
clear_bit(queue_id, &fep->work_tx); for (i = fep->num_tx_queues - 1; i >= 0; i--)
fec_enet_tx_queue(ndev, queue_id); fec_enet_tx_queue(ndev, i);
}
return;
} }
static int static int
...@@ -1426,7 +1419,6 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) ...@@ -1426,7 +1419,6 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
#ifdef CONFIG_M532x #ifdef CONFIG_M532x
flush_cache_all(); flush_cache_all();
#endif #endif
queue_id = FEC_ENET_GET_QUQUE(queue_id);
rxq = fep->rx_queue[queue_id]; rxq = fep->rx_queue[queue_id];
/* First, grab all of the stats for the incoming packet. /* First, grab all of the stats for the incoming packet.
...@@ -1550,6 +1542,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) ...@@ -1550,6 +1542,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
htons(ETH_P_8021Q), htons(ETH_P_8021Q),
vlan_tag); vlan_tag);
skb_record_rx_queue(skb, queue_id);
napi_gro_receive(&fep->napi, skb); napi_gro_receive(&fep->napi, skb);
if (is_copybreak) { if (is_copybreak) {
...@@ -1595,48 +1588,30 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) ...@@ -1595,48 +1588,30 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
return pkt_received; return pkt_received;
} }
static int static int fec_enet_rx(struct net_device *ndev, int budget)
fec_enet_rx(struct net_device *ndev, int budget)
{ {
int pkt_received = 0;
u16 queue_id;
struct fec_enet_private *fep = netdev_priv(ndev); struct fec_enet_private *fep = netdev_priv(ndev);
int i, done = 0;
for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) { /* Make sure that AVB queues are processed first. */
int ret; for (i = fep->num_rx_queues - 1; i >= 0; i--)
done += fec_enet_rx_queue(ndev, budget - done, i);
ret = fec_enet_rx_queue(ndev,
budget - pkt_received, queue_id);
if (ret < budget - pkt_received) return done;
clear_bit(queue_id, &fep->work_rx);
pkt_received += ret;
}
return pkt_received;
} }
static bool static bool fec_enet_collect_events(struct fec_enet_private *fep)
fec_enet_collect_events(struct fec_enet_private *fep, uint int_events)
{ {
if (int_events == 0) uint int_events;
return false;
if (int_events & FEC_ENET_RXF_0) int_events = readl(fep->hwp + FEC_IEVENT);
fep->work_rx |= (1 << 2);
if (int_events & FEC_ENET_RXF_1)
fep->work_rx |= (1 << 0);
if (int_events & FEC_ENET_RXF_2)
fep->work_rx |= (1 << 1);
if (int_events & FEC_ENET_TXF_0) /* Don't clear MDIO events, we poll for those */
fep->work_tx |= (1 << 2); int_events &= ~FEC_ENET_MII;
if (int_events & FEC_ENET_TXF_1)
fep->work_tx |= (1 << 0);
if (int_events & FEC_ENET_TXF_2)
fep->work_tx |= (1 << 1);
return true; writel(int_events, fep->hwp + FEC_IEVENT);
return int_events != 0;
} }
static irqreturn_t static irqreturn_t
...@@ -1644,18 +1619,9 @@ fec_enet_interrupt(int irq, void *dev_id) ...@@ -1644,18 +1619,9 @@ fec_enet_interrupt(int irq, void *dev_id)
{ {
struct net_device *ndev = dev_id; struct net_device *ndev = dev_id;
struct fec_enet_private *fep = netdev_priv(ndev); struct fec_enet_private *fep = netdev_priv(ndev);
uint int_events;
irqreturn_t ret = IRQ_NONE; irqreturn_t ret = IRQ_NONE;
int_events = readl(fep->hwp + FEC_IEVENT); if (fec_enet_collect_events(fep) && fep->link) {
/* Don't clear MDIO events, we poll for those */
int_events &= ~FEC_ENET_MII;
writel(int_events, fep->hwp + FEC_IEVENT);
fec_enet_collect_events(fep, int_events);
if ((fep->work_tx || fep->work_rx) && fep->link) {
ret = IRQ_HANDLED; ret = IRQ_HANDLED;
if (napi_schedule_prep(&fep->napi)) { if (napi_schedule_prep(&fep->napi)) {
...@@ -1672,17 +1638,19 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget) ...@@ -1672,17 +1638,19 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
{ {
struct net_device *ndev = napi->dev; struct net_device *ndev = napi->dev;
struct fec_enet_private *fep = netdev_priv(ndev); struct fec_enet_private *fep = netdev_priv(ndev);
int pkts; int done = 0;
pkts = fec_enet_rx(ndev, budget);
do {
done += fec_enet_rx(ndev, budget - done);
fec_enet_tx(ndev); fec_enet_tx(ndev);
} while ((done < budget) && fec_enet_collect_events(fep));
if (pkts < budget) { if (done < budget) {
napi_complete_done(napi, pkts); napi_complete_done(napi, done);
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
} }
return pkts;
return done;
} }
/* ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment