Commit 0a8bd81f authored by Biao Huang's avatar Biao Huang Committed by David S. Miller

net: ethernet: mtk-star-emac: separate tx/rx handling with two NAPIs

Current driver may lost tx interrupts under bidirectional test with iperf3,
which leads to some unexpected issues.

This patch let rx/tx interrupt enable/disable separately, and rx/tx are
handled in different NAPIs.
Signed-off-by: default avatarBiao Huang <biao.huang@mediatek.com>
Signed-off-by: default avatarYinghua Pan <ot_yinghua.pan@mediatek.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 0027340a
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#define MTK_STAR_SKB_ALIGNMENT 16 #define MTK_STAR_SKB_ALIGNMENT 16
#define MTK_STAR_HASHTABLE_MC_LIMIT 256 #define MTK_STAR_HASHTABLE_MC_LIMIT 256
#define MTK_STAR_HASHTABLE_SIZE_MAX 512 #define MTK_STAR_HASHTABLE_SIZE_MAX 512
#define MTK_STAR_DESC_NEEDED (MAX_SKB_FRAGS + 4)
/* Normally we'd use NET_IP_ALIGN but on arm64 its value is 0 and it doesn't /* Normally we'd use NET_IP_ALIGN but on arm64 its value is 0 and it doesn't
* work for this controller. * work for this controller.
...@@ -228,7 +229,8 @@ struct mtk_star_ring_desc_data { ...@@ -228,7 +229,8 @@ struct mtk_star_ring_desc_data {
struct sk_buff *skb; struct sk_buff *skb;
}; };
#define MTK_STAR_RING_NUM_DESCS 128 #define MTK_STAR_RING_NUM_DESCS 512
#define MTK_STAR_TX_THRESH (MTK_STAR_RING_NUM_DESCS / 4)
#define MTK_STAR_NUM_TX_DESCS MTK_STAR_RING_NUM_DESCS #define MTK_STAR_NUM_TX_DESCS MTK_STAR_RING_NUM_DESCS
#define MTK_STAR_NUM_RX_DESCS MTK_STAR_RING_NUM_DESCS #define MTK_STAR_NUM_RX_DESCS MTK_STAR_RING_NUM_DESCS
#define MTK_STAR_NUM_DESCS_TOTAL (MTK_STAR_RING_NUM_DESCS * 2) #define MTK_STAR_NUM_DESCS_TOTAL (MTK_STAR_RING_NUM_DESCS * 2)
...@@ -263,7 +265,8 @@ struct mtk_star_priv { ...@@ -263,7 +265,8 @@ struct mtk_star_priv {
struct mtk_star_ring rx_ring; struct mtk_star_ring rx_ring;
struct mii_bus *mii; struct mii_bus *mii;
struct napi_struct napi; struct napi_struct tx_napi;
struct napi_struct rx_napi;
struct device_node *phy_node; struct device_node *phy_node;
phy_interface_t phy_intf; phy_interface_t phy_intf;
...@@ -379,19 +382,16 @@ mtk_star_ring_push_head_tx(struct mtk_star_ring *ring, ...@@ -379,19 +382,16 @@ mtk_star_ring_push_head_tx(struct mtk_star_ring *ring,
mtk_star_ring_push_head(ring, desc_data, flags); mtk_star_ring_push_head(ring, desc_data, flags);
} }
static unsigned int mtk_star_ring_num_used_descs(struct mtk_star_ring *ring) static unsigned int mtk_star_tx_ring_avail(struct mtk_star_ring *ring)
{ {
return abs(ring->head - ring->tail); u32 avail;
}
static bool mtk_star_ring_full(struct mtk_star_ring *ring) if (ring->tail > ring->head)
{ avail = ring->tail - ring->head - 1;
return mtk_star_ring_num_used_descs(ring) == MTK_STAR_RING_NUM_DESCS; else
} avail = MTK_STAR_RING_NUM_DESCS - ring->head + ring->tail - 1;
static bool mtk_star_ring_descs_available(struct mtk_star_ring *ring) return avail;
{
return mtk_star_ring_num_used_descs(ring) > 0;
} }
static dma_addr_t mtk_star_dma_map_rx(struct mtk_star_priv *priv, static dma_addr_t mtk_star_dma_map_rx(struct mtk_star_priv *priv,
...@@ -436,6 +436,36 @@ static void mtk_star_nic_disable_pd(struct mtk_star_priv *priv) ...@@ -436,6 +436,36 @@ static void mtk_star_nic_disable_pd(struct mtk_star_priv *priv)
MTK_STAR_BIT_MAC_CFG_NIC_PD); MTK_STAR_BIT_MAC_CFG_NIC_PD);
} }
static void mtk_star_enable_dma_irq(struct mtk_star_priv *priv,
bool rx, bool tx)
{
u32 value;
regmap_read(priv->regs, MTK_STAR_REG_INT_MASK, &value);
if (tx)
value &= ~MTK_STAR_BIT_INT_STS_TNTC;
if (rx)
value &= ~MTK_STAR_BIT_INT_STS_FNRC;
regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, value);
}
static void mtk_star_disable_dma_irq(struct mtk_star_priv *priv,
bool rx, bool tx)
{
u32 value;
regmap_read(priv->regs, MTK_STAR_REG_INT_MASK, &value);
if (tx)
value |= MTK_STAR_BIT_INT_STS_TNTC;
if (rx)
value |= MTK_STAR_BIT_INT_STS_FNRC;
regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, value);
}
/* Unmask the three interrupts we care about, mask all others. */ /* Unmask the three interrupts we care about, mask all others. */
static void mtk_star_intr_enable(struct mtk_star_priv *priv) static void mtk_star_intr_enable(struct mtk_star_priv *priv)
{ {
...@@ -451,20 +481,11 @@ static void mtk_star_intr_disable(struct mtk_star_priv *priv) ...@@ -451,20 +481,11 @@ static void mtk_star_intr_disable(struct mtk_star_priv *priv)
regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, ~0); regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, ~0);
} }
static unsigned int mtk_star_intr_read(struct mtk_star_priv *priv)
{
unsigned int val;
regmap_read(priv->regs, MTK_STAR_REG_INT_STS, &val);
return val;
}
static unsigned int mtk_star_intr_ack_all(struct mtk_star_priv *priv) static unsigned int mtk_star_intr_ack_all(struct mtk_star_priv *priv)
{ {
unsigned int val; unsigned int val;
val = mtk_star_intr_read(priv); regmap_read(priv->regs, MTK_STAR_REG_INT_STS, &val);
regmap_write(priv->regs, MTK_STAR_REG_INT_STS, val); regmap_write(priv->regs, MTK_STAR_REG_INT_STS, val);
return val; return val;
...@@ -736,25 +757,44 @@ static void mtk_star_free_tx_skbs(struct mtk_star_priv *priv) ...@@ -736,25 +757,44 @@ static void mtk_star_free_tx_skbs(struct mtk_star_priv *priv)
mtk_star_ring_free_skbs(priv, ring, mtk_star_dma_unmap_tx); mtk_star_ring_free_skbs(priv, ring, mtk_star_dma_unmap_tx);
} }
/* All processing for TX and RX happens in the napi poll callback. /**
* * mtk_star_handle_irq - Interrupt Handler.
* FIXME: The interrupt handling should be more fine-grained with each * @irq: interrupt number.
* interrupt enabled/disabled independently when needed. Unfortunatly this * @data: pointer to a network interface device structure.
* turned out to impact the driver's stability and until we have something * Description : this is the driver interrupt service routine.
* working properly, we're disabling all interrupts during TX & RX processing * it mainly handles:
* or when resetting the counter registers. * 1. tx complete interrupt for frame transmission.
*/ * 2. rx complete interrupt for frame reception.
* 3. MAC Management Counter interrupt to avoid counter overflow.
**/
static irqreturn_t mtk_star_handle_irq(int irq, void *data) static irqreturn_t mtk_star_handle_irq(int irq, void *data)
{ {
struct mtk_star_priv *priv; struct net_device *ndev = data;
struct net_device *ndev; struct mtk_star_priv *priv = netdev_priv(ndev);
unsigned int intr_status = mtk_star_intr_ack_all(priv);
ndev = data; bool rx, tx;
priv = netdev_priv(ndev);
rx = (intr_status & MTK_STAR_BIT_INT_STS_FNRC) &&
napi_schedule_prep(&priv->rx_napi);
tx = (intr_status & MTK_STAR_BIT_INT_STS_TNTC) &&
napi_schedule_prep(&priv->tx_napi);
if (rx || tx) {
spin_lock(&priv->lock);
/* mask Rx and TX Complete interrupt */
mtk_star_disable_dma_irq(priv, rx, tx);
spin_unlock(&priv->lock);
if (rx)
__napi_schedule(&priv->rx_napi);
if (tx)
__napi_schedule(&priv->tx_napi);
}
if (netif_running(ndev)) { /* interrupt is triggered once any counters reach 0x8000000 */
mtk_star_intr_disable(priv); if (intr_status & MTK_STAR_REG_INT_STS_MIB_CNT_TH) {
napi_schedule(&priv->napi); mtk_star_update_stats(priv);
mtk_star_reset_counters(priv);
} }
return IRQ_HANDLED; return IRQ_HANDLED;
...@@ -970,7 +1010,8 @@ static int mtk_star_enable(struct net_device *ndev) ...@@ -970,7 +1010,8 @@ static int mtk_star_enable(struct net_device *ndev)
if (ret) if (ret)
goto err_free_skbs; goto err_free_skbs;
napi_enable(&priv->napi); napi_enable(&priv->tx_napi);
napi_enable(&priv->rx_napi);
mtk_star_intr_ack_all(priv); mtk_star_intr_ack_all(priv);
mtk_star_intr_enable(priv); mtk_star_intr_enable(priv);
...@@ -1003,7 +1044,8 @@ static void mtk_star_disable(struct net_device *ndev) ...@@ -1003,7 +1044,8 @@ static void mtk_star_disable(struct net_device *ndev)
struct mtk_star_priv *priv = netdev_priv(ndev); struct mtk_star_priv *priv = netdev_priv(ndev);
netif_stop_queue(ndev); netif_stop_queue(ndev);
napi_disable(&priv->napi); napi_disable(&priv->tx_napi);
napi_disable(&priv->rx_napi);
mtk_star_intr_disable(priv); mtk_star_intr_disable(priv);
mtk_star_dma_disable(priv); mtk_star_dma_disable(priv);
mtk_star_intr_ack_all(priv); mtk_star_intr_ack_all(priv);
...@@ -1035,13 +1077,45 @@ static int mtk_star_netdev_ioctl(struct net_device *ndev, ...@@ -1035,13 +1077,45 @@ static int mtk_star_netdev_ioctl(struct net_device *ndev,
return phy_mii_ioctl(ndev->phydev, req, cmd); return phy_mii_ioctl(ndev->phydev, req, cmd);
} }
static int mtk_star_netdev_start_xmit(struct sk_buff *skb, static int __mtk_star_maybe_stop_tx(struct mtk_star_priv *priv, u16 size)
struct net_device *ndev) {
netif_stop_queue(priv->ndev);
/* Might race with mtk_star_tx_poll, check again */
smp_mb();
if (likely(mtk_star_tx_ring_avail(&priv->tx_ring) < size))
return -EBUSY;
netif_start_queue(priv->ndev);
return 0;
}
static inline int mtk_star_maybe_stop_tx(struct mtk_star_priv *priv, u16 size)
{
if (likely(mtk_star_tx_ring_avail(&priv->tx_ring) >= size))
return 0;
return __mtk_star_maybe_stop_tx(priv, size);
}
static netdev_tx_t mtk_star_netdev_start_xmit(struct sk_buff *skb,
struct net_device *ndev)
{ {
struct mtk_star_priv *priv = netdev_priv(ndev); struct mtk_star_priv *priv = netdev_priv(ndev);
struct mtk_star_ring *ring = &priv->tx_ring; struct mtk_star_ring *ring = &priv->tx_ring;
struct device *dev = mtk_star_get_dev(priv); struct device *dev = mtk_star_get_dev(priv);
struct mtk_star_ring_desc_data desc_data; struct mtk_star_ring_desc_data desc_data;
int nfrags = skb_shinfo(skb)->nr_frags;
if (unlikely(mtk_star_tx_ring_avail(ring) < nfrags + 1)) {
if (!netif_queue_stopped(ndev)) {
netif_stop_queue(ndev);
/* This is a hard error, log it. */
pr_err_ratelimited("Tx ring full when queue awake\n");
}
return NETDEV_TX_BUSY;
}
desc_data.dma_addr = mtk_star_dma_map_tx(priv, skb); desc_data.dma_addr = mtk_star_dma_map_tx(priv, skb);
if (dma_mapping_error(dev, desc_data.dma_addr)) if (dma_mapping_error(dev, desc_data.dma_addr))
...@@ -1049,17 +1123,11 @@ static int mtk_star_netdev_start_xmit(struct sk_buff *skb, ...@@ -1049,17 +1123,11 @@ static int mtk_star_netdev_start_xmit(struct sk_buff *skb,
desc_data.skb = skb; desc_data.skb = skb;
desc_data.len = skb->len; desc_data.len = skb->len;
spin_lock_bh(&priv->lock);
mtk_star_ring_push_head_tx(ring, &desc_data); mtk_star_ring_push_head_tx(ring, &desc_data);
netdev_sent_queue(ndev, skb->len); netdev_sent_queue(ndev, skb->len);
if (mtk_star_ring_full(ring)) mtk_star_maybe_stop_tx(priv, MTK_STAR_DESC_NEEDED);
netif_stop_queue(ndev);
spin_unlock_bh(&priv->lock);
mtk_star_dma_resume_tx(priv); mtk_star_dma_resume_tx(priv);
...@@ -1091,31 +1159,40 @@ static int mtk_star_tx_complete_one(struct mtk_star_priv *priv) ...@@ -1091,31 +1159,40 @@ static int mtk_star_tx_complete_one(struct mtk_star_priv *priv)
return ret; return ret;
} }
static void mtk_star_tx_complete_all(struct mtk_star_priv *priv) static int mtk_star_tx_poll(struct napi_struct *napi, int budget)
{ {
struct mtk_star_priv *priv = container_of(napi, struct mtk_star_priv,
tx_napi);
int ret = 0, pkts_compl = 0, bytes_compl = 0, count = 0;
struct mtk_star_ring *ring = &priv->tx_ring; struct mtk_star_ring *ring = &priv->tx_ring;
struct net_device *ndev = priv->ndev; struct net_device *ndev = priv->ndev;
int ret, pkts_compl, bytes_compl; unsigned int head = ring->head;
bool wake = false; unsigned int entry = ring->tail;
spin_lock(&priv->lock);
for (pkts_compl = 0, bytes_compl = 0;;
pkts_compl++, bytes_compl += ret, wake = true) {
if (!mtk_star_ring_descs_available(ring))
break;
while (entry != head && count < (MTK_STAR_RING_NUM_DESCS - 1)) {
ret = mtk_star_tx_complete_one(priv); ret = mtk_star_tx_complete_one(priv);
if (ret < 0) if (ret < 0)
break; break;
count++;
pkts_compl++;
bytes_compl += ret;
entry = ring->tail;
} }
netdev_completed_queue(ndev, pkts_compl, bytes_compl); netdev_completed_queue(ndev, pkts_compl, bytes_compl);
if (wake && netif_queue_stopped(ndev)) if (unlikely(netif_queue_stopped(ndev)) &&
(mtk_star_tx_ring_avail(ring) > MTK_STAR_TX_THRESH))
netif_wake_queue(ndev); netif_wake_queue(ndev);
spin_unlock(&priv->lock); if (napi_complete(napi)) {
spin_lock(&priv->lock);
mtk_star_enable_dma_irq(priv, false, true);
spin_unlock(&priv->lock);
}
return 0;
} }
static void mtk_star_netdev_get_stats64(struct net_device *ndev, static void mtk_star_netdev_get_stats64(struct net_device *ndev,
...@@ -1195,7 +1272,7 @@ static const struct ethtool_ops mtk_star_ethtool_ops = { ...@@ -1195,7 +1272,7 @@ static const struct ethtool_ops mtk_star_ethtool_ops = {
.set_link_ksettings = phy_ethtool_set_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings,
}; };
static int mtk_star_receive_packet(struct mtk_star_priv *priv) static int mtk_star_rx(struct mtk_star_priv *priv, int budget)
{ {
struct mtk_star_ring *ring = &priv->rx_ring; struct mtk_star_ring *ring = &priv->rx_ring;
struct device *dev = mtk_star_get_dev(priv); struct device *dev = mtk_star_get_dev(priv);
...@@ -1203,107 +1280,85 @@ static int mtk_star_receive_packet(struct mtk_star_priv *priv) ...@@ -1203,107 +1280,85 @@ static int mtk_star_receive_packet(struct mtk_star_priv *priv)
struct net_device *ndev = priv->ndev; struct net_device *ndev = priv->ndev;
struct sk_buff *curr_skb, *new_skb; struct sk_buff *curr_skb, *new_skb;
dma_addr_t new_dma_addr; dma_addr_t new_dma_addr;
int ret; int ret, count = 0;
spin_lock(&priv->lock); while (count < budget) {
ret = mtk_star_ring_pop_tail(ring, &desc_data); ret = mtk_star_ring_pop_tail(ring, &desc_data);
spin_unlock(&priv->lock); if (ret)
if (ret) return -1;
return -1;
curr_skb = desc_data.skb; curr_skb = desc_data.skb;
if ((desc_data.flags & MTK_STAR_DESC_BIT_RX_CRCE) || if ((desc_data.flags & MTK_STAR_DESC_BIT_RX_CRCE) ||
(desc_data.flags & MTK_STAR_DESC_BIT_RX_OSIZE)) { (desc_data.flags & MTK_STAR_DESC_BIT_RX_OSIZE)) {
/* Error packet -> drop and reuse skb. */ /* Error packet -> drop and reuse skb. */
new_skb = curr_skb; new_skb = curr_skb;
goto push_new_skb; goto push_new_skb;
} }
/* Prepare new skb before receiving the current one. Reuse the current /* Prepare new skb before receiving the current one.
* skb if we fail at any point. * Reuse the current skb if we fail at any point.
*/ */
new_skb = mtk_star_alloc_skb(ndev); new_skb = mtk_star_alloc_skb(ndev);
if (!new_skb) { if (!new_skb) {
ndev->stats.rx_dropped++; ndev->stats.rx_dropped++;
new_skb = curr_skb; new_skb = curr_skb;
goto push_new_skb; goto push_new_skb;
} }
new_dma_addr = mtk_star_dma_map_rx(priv, new_skb); new_dma_addr = mtk_star_dma_map_rx(priv, new_skb);
if (dma_mapping_error(dev, new_dma_addr)) { if (dma_mapping_error(dev, new_dma_addr)) {
ndev->stats.rx_dropped++; ndev->stats.rx_dropped++;
dev_kfree_skb(new_skb); dev_kfree_skb(new_skb);
new_skb = curr_skb; new_skb = curr_skb;
netdev_err(ndev, "DMA mapping error of RX descriptor\n"); netdev_err(ndev, "DMA mapping error of RX descriptor\n");
goto push_new_skb; goto push_new_skb;
} }
/* We can't fail anymore at this point: it's safe to unmap the skb. */ /* We can't fail anymore at this point:
mtk_star_dma_unmap_rx(priv, &desc_data); * it's safe to unmap the skb.
*/
mtk_star_dma_unmap_rx(priv, &desc_data);
skb_put(desc_data.skb, desc_data.len); skb_put(desc_data.skb, desc_data.len);
desc_data.skb->ip_summed = CHECKSUM_NONE; desc_data.skb->ip_summed = CHECKSUM_NONE;
desc_data.skb->protocol = eth_type_trans(desc_data.skb, ndev); desc_data.skb->protocol = eth_type_trans(desc_data.skb, ndev);
desc_data.skb->dev = ndev; desc_data.skb->dev = ndev;
netif_receive_skb(desc_data.skb); netif_receive_skb(desc_data.skb);
/* update dma_addr for new skb */ /* update dma_addr for new skb */
desc_data.dma_addr = new_dma_addr; desc_data.dma_addr = new_dma_addr;
push_new_skb: push_new_skb:
desc_data.len = skb_tailroom(new_skb);
desc_data.skb = new_skb;
spin_lock(&priv->lock); count++;
mtk_star_ring_push_head_rx(ring, &desc_data);
spin_unlock(&priv->lock);
return 0;
}
static int mtk_star_process_rx(struct mtk_star_priv *priv, int budget)
{
int received, ret;
for (received = 0, ret = 0; received < budget && ret == 0; received++) desc_data.len = skb_tailroom(new_skb);
ret = mtk_star_receive_packet(priv); desc_data.skb = new_skb;
mtk_star_ring_push_head_rx(ring, &desc_data);
}
mtk_star_dma_resume_rx(priv); mtk_star_dma_resume_rx(priv);
return received; return count;
} }
static int mtk_star_poll(struct napi_struct *napi, int budget) static int mtk_star_rx_poll(struct napi_struct *napi, int budget)
{ {
struct mtk_star_priv *priv; struct mtk_star_priv *priv;
unsigned int status; int work_done = 0;
int received = 0;
priv = container_of(napi, struct mtk_star_priv, napi);
status = mtk_star_intr_read(priv);
mtk_star_intr_ack_all(priv);
if (status & MTK_STAR_BIT_INT_STS_TNTC)
/* Clean-up all TX descriptors. */
mtk_star_tx_complete_all(priv);
if (status & MTK_STAR_BIT_INT_STS_FNRC) priv = container_of(napi, struct mtk_star_priv, rx_napi);
/* Receive up to $budget packets. */
received = mtk_star_process_rx(priv, budget);
if (unlikely(status & MTK_STAR_REG_INT_STS_MIB_CNT_TH)) { work_done = mtk_star_rx(priv, budget);
mtk_star_update_stats(priv); if (work_done < budget) {
mtk_star_reset_counters(priv); napi_complete_done(napi, work_done);
spin_lock(&priv->lock);
mtk_star_enable_dma_irq(priv, true, false);
spin_unlock(&priv->lock);
} }
if (received < budget) return work_done;
napi_complete_done(napi, received);
mtk_star_intr_enable(priv);
return received;
} }
static void mtk_star_mdio_rwok_clear(struct mtk_star_priv *priv) static void mtk_star_mdio_rwok_clear(struct mtk_star_priv *priv)
...@@ -1602,7 +1657,10 @@ static int mtk_star_probe(struct platform_device *pdev) ...@@ -1602,7 +1657,10 @@ static int mtk_star_probe(struct platform_device *pdev)
ndev->netdev_ops = &mtk_star_netdev_ops; ndev->netdev_ops = &mtk_star_netdev_ops;
ndev->ethtool_ops = &mtk_star_ethtool_ops; ndev->ethtool_ops = &mtk_star_ethtool_ops;
netif_napi_add(ndev, &priv->napi, mtk_star_poll, NAPI_POLL_WEIGHT); netif_napi_add(ndev, &priv->rx_napi, mtk_star_rx_poll,
NAPI_POLL_WEIGHT);
netif_tx_napi_add(ndev, &priv->tx_napi, mtk_star_tx_poll,
NAPI_POLL_WEIGHT);
return devm_register_netdev(dev, ndev); return devm_register_netdev(dev, ndev);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment