Commit 021bd5e3 authored by Jose Abreu's avatar Jose Abreu Committed by David S. Miller

net: stmmac: Let TX and RX interrupts be independently enabled/disabled

By using this mechanism we can get rid of the not so nice method of
scheduling TX NAPI when the RX was scheduled. No bandwidth reduction was
seen with this change.

Changes from v1:
- Remove useless comment (Jakub)
- Do not bind the TX clean to NAPI budget (Jakub)
Signed-off-by: default avatarJose Abreu <Jose.Abreu@synopsys.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 7d0b447a
...@@ -335,14 +335,30 @@ static void sun8i_dwmac_dump_mac_regs(struct mac_device_info *hw, ...@@ -335,14 +335,30 @@ static void sun8i_dwmac_dump_mac_regs(struct mac_device_info *hw,
} }
} }
static void sun8i_dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan) static void sun8i_dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan,
bool rx, bool tx)
{ {
writel(EMAC_RX_INT | EMAC_TX_INT, ioaddr + EMAC_INT_EN); u32 value = readl(ioaddr + EMAC_INT_EN);
if (rx)
value |= EMAC_RX_INT;
if (tx)
value |= EMAC_TX_INT;
writel(value, ioaddr + EMAC_INT_EN);
} }
static void sun8i_dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan) static void sun8i_dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan,
bool rx, bool tx)
{ {
writel(0, ioaddr + EMAC_INT_EN); u32 value = readl(ioaddr + EMAC_INT_EN);
if (rx)
value &= ~EMAC_RX_INT;
if (tx)
value &= ~EMAC_TX_INT;
writel(value, ioaddr + EMAC_INT_EN);
} }
static void sun8i_dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan) static void sun8i_dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan)
......
...@@ -168,6 +168,8 @@ ...@@ -168,6 +168,8 @@
/* DMA default interrupt mask for 4.00 */ /* DMA default interrupt mask for 4.00 */
#define DMA_CHAN_INTR_DEFAULT_MASK (DMA_CHAN_INTR_NORMAL | \ #define DMA_CHAN_INTR_DEFAULT_MASK (DMA_CHAN_INTR_NORMAL | \
DMA_CHAN_INTR_ABNORMAL) DMA_CHAN_INTR_ABNORMAL)
#define DMA_CHAN_INTR_DEFAULT_RX (DMA_CHAN_INTR_ENA_RIE)
#define DMA_CHAN_INTR_DEFAULT_TX (DMA_CHAN_INTR_ENA_TIE)
#define DMA_CHAN_INTR_NORMAL_4_10 (DMA_CHAN_INTR_ENA_NIE_4_10 | \ #define DMA_CHAN_INTR_NORMAL_4_10 (DMA_CHAN_INTR_ENA_NIE_4_10 | \
DMA_CHAN_INTR_ENA_RIE | \ DMA_CHAN_INTR_ENA_RIE | \
...@@ -178,6 +180,8 @@ ...@@ -178,6 +180,8 @@
/* DMA default interrupt mask for 4.10a */ /* DMA default interrupt mask for 4.10a */
#define DMA_CHAN_INTR_DEFAULT_MASK_4_10 (DMA_CHAN_INTR_NORMAL_4_10 | \ #define DMA_CHAN_INTR_DEFAULT_MASK_4_10 (DMA_CHAN_INTR_NORMAL_4_10 | \
DMA_CHAN_INTR_ABNORMAL_4_10) DMA_CHAN_INTR_ABNORMAL_4_10)
#define DMA_CHAN_INTR_DEFAULT_RX_4_10 (DMA_CHAN_INTR_ENA_RIE)
#define DMA_CHAN_INTR_DEFAULT_TX_4_10 (DMA_CHAN_INTR_ENA_TIE)
/* channel 0 specific fields */ /* channel 0 specific fields */
#define DMA_CHAN0_DBG_STAT_TPS GENMASK(15, 12) #define DMA_CHAN0_DBG_STAT_TPS GENMASK(15, 12)
...@@ -186,9 +190,10 @@ ...@@ -186,9 +190,10 @@
#define DMA_CHAN0_DBG_STAT_RPS_SHIFT 8 #define DMA_CHAN0_DBG_STAT_RPS_SHIFT 8
int dwmac4_dma_reset(void __iomem *ioaddr); int dwmac4_dma_reset(void __iomem *ioaddr);
void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan); void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx);
void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan); void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx);
void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan); void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx);
void dwmac410_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx);
void dwmac4_dma_start_tx(void __iomem *ioaddr, u32 chan); void dwmac4_dma_start_tx(void __iomem *ioaddr, u32 chan);
void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan); void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan);
void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan); void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan);
......
...@@ -97,21 +97,52 @@ void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan) ...@@ -97,21 +97,52 @@ void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan)
writel(len, ioaddr + DMA_CHAN_RX_RING_LEN(chan)); writel(len, ioaddr + DMA_CHAN_RX_RING_LEN(chan));
} }
void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan) void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx)
{ {
writel(DMA_CHAN_INTR_DEFAULT_MASK, ioaddr + u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
DMA_CHAN_INTR_ENA(chan));
if (rx)
value |= DMA_CHAN_INTR_DEFAULT_RX;
if (tx)
value |= DMA_CHAN_INTR_DEFAULT_TX;
writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan));
} }
void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan) void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx)
{ {
writel(DMA_CHAN_INTR_DEFAULT_MASK_4_10, u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
ioaddr + DMA_CHAN_INTR_ENA(chan));
if (rx)
value |= DMA_CHAN_INTR_DEFAULT_RX_4_10;
if (tx)
value |= DMA_CHAN_INTR_DEFAULT_TX_4_10;
writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan));
} }
void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan) void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx)
{ {
writel(0, ioaddr + DMA_CHAN_INTR_ENA(chan)); u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
if (rx)
value &= ~DMA_CHAN_INTR_DEFAULT_RX;
if (tx)
value &= ~DMA_CHAN_INTR_DEFAULT_TX;
writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan));
}
void dwmac410_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx)
{
u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
if (rx)
value &= ~DMA_CHAN_INTR_DEFAULT_RX_4_10;
if (tx)
value &= ~DMA_CHAN_INTR_DEFAULT_TX_4_10;
writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan));
} }
int dwmac4_dma_interrupt(void __iomem *ioaddr, int dwmac4_dma_interrupt(void __iomem *ioaddr,
......
...@@ -96,6 +96,8 @@ ...@@ -96,6 +96,8 @@
/* DMA default interrupt mask */ /* DMA default interrupt mask */
#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL) #define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
#define DMA_INTR_DEFAULT_RX (DMA_INTR_ENA_RIE)
#define DMA_INTR_DEFAULT_TX (DMA_INTR_ENA_TIE)
/* DMA Status register defines */ /* DMA Status register defines */
#define DMA_STATUS_GLPII 0x40000000 /* GMAC LPI interrupt */ #define DMA_STATUS_GLPII 0x40000000 /* GMAC LPI interrupt */
...@@ -130,8 +132,8 @@ ...@@ -130,8 +132,8 @@
#define NUM_DWMAC1000_DMA_REGS 23 #define NUM_DWMAC1000_DMA_REGS 23
void dwmac_enable_dma_transmission(void __iomem *ioaddr); void dwmac_enable_dma_transmission(void __iomem *ioaddr);
void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan); void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx);
void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan); void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx);
void dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan); void dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan);
void dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan); void dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan);
void dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan); void dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan);
......
...@@ -37,14 +37,28 @@ void dwmac_enable_dma_transmission(void __iomem *ioaddr) ...@@ -37,14 +37,28 @@ void dwmac_enable_dma_transmission(void __iomem *ioaddr)
writel(1, ioaddr + DMA_XMT_POLL_DEMAND); writel(1, ioaddr + DMA_XMT_POLL_DEMAND);
} }
void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan) void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx)
{ {
writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA); u32 value = readl(ioaddr + DMA_INTR_ENA);
if (rx)
value |= DMA_INTR_DEFAULT_RX;
if (tx)
value |= DMA_INTR_DEFAULT_TX;
writel(value, ioaddr + DMA_INTR_ENA);
} }
void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan) void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx)
{ {
writel(0, ioaddr + DMA_INTR_ENA); u32 value = readl(ioaddr + DMA_INTR_ENA);
if (rx)
value &= ~DMA_INTR_DEFAULT_RX;
if (tx)
value &= ~DMA_INTR_DEFAULT_TX;
writel(value, ioaddr + DMA_INTR_ENA);
} }
void dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan) void dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan)
......
...@@ -361,6 +361,8 @@ ...@@ -361,6 +361,8 @@
#define XGMAC_TIE BIT(0) #define XGMAC_TIE BIT(0)
#define XGMAC_DMA_INT_DEFAULT_EN (XGMAC_NIE | XGMAC_AIE | XGMAC_RBUE | \ #define XGMAC_DMA_INT_DEFAULT_EN (XGMAC_NIE | XGMAC_AIE | XGMAC_RBUE | \
XGMAC_RIE | XGMAC_TIE) XGMAC_RIE | XGMAC_TIE)
#define XGMAC_DMA_INT_DEFAULT_RX (XGMAC_RBUE | XGMAC_RIE)
#define XGMAC_DMA_INT_DEFAULT_TX (XGMAC_TIE)
#define XGMAC_DMA_CH_Rx_WATCHDOG(x) (0x0000313c + (0x80 * (x))) #define XGMAC_DMA_CH_Rx_WATCHDOG(x) (0x0000313c + (0x80 * (x)))
#define XGMAC_RWT GENMASK(7, 0) #define XGMAC_RWT GENMASK(7, 0)
#define XGMAC_DMA_CH_STATUS(x) (0x00003160 + (0x80 * (x))) #define XGMAC_DMA_CH_STATUS(x) (0x00003160 + (0x80 * (x)))
......
...@@ -248,14 +248,30 @@ static void dwxgmac2_dma_tx_mode(void __iomem *ioaddr, int mode, ...@@ -248,14 +248,30 @@ static void dwxgmac2_dma_tx_mode(void __iomem *ioaddr, int mode,
writel(value, ioaddr + XGMAC_MTL_TXQ_OPMODE(channel)); writel(value, ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
} }
static void dwxgmac2_enable_dma_irq(void __iomem *ioaddr, u32 chan) static void dwxgmac2_enable_dma_irq(void __iomem *ioaddr, u32 chan,
bool rx, bool tx)
{ {
writel(XGMAC_DMA_INT_DEFAULT_EN, ioaddr + XGMAC_DMA_CH_INT_EN(chan)); u32 value = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
if (rx)
value |= XGMAC_DMA_INT_DEFAULT_RX;
if (tx)
value |= XGMAC_DMA_INT_DEFAULT_TX;
writel(value, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
} }
static void dwxgmac2_disable_dma_irq(void __iomem *ioaddr, u32 chan) static void dwxgmac2_disable_dma_irq(void __iomem *ioaddr, u32 chan,
bool rx, bool tx)
{ {
writel(0, ioaddr + XGMAC_DMA_CH_INT_EN(chan)); u32 value = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
if (rx)
value &= ~XGMAC_DMA_INT_DEFAULT_RX;
if (tx)
value &= ~XGMAC_DMA_INT_DEFAULT_TX;
writel(value, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
} }
static void dwxgmac2_dma_start_tx(void __iomem *ioaddr, u32 chan) static void dwxgmac2_dma_start_tx(void __iomem *ioaddr, u32 chan)
......
...@@ -187,8 +187,10 @@ struct stmmac_dma_ops { ...@@ -187,8 +187,10 @@ struct stmmac_dma_ops {
void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x, void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
void __iomem *ioaddr); void __iomem *ioaddr);
void (*enable_dma_transmission) (void __iomem *ioaddr); void (*enable_dma_transmission) (void __iomem *ioaddr);
void (*enable_dma_irq)(void __iomem *ioaddr, u32 chan); void (*enable_dma_irq)(void __iomem *ioaddr, u32 chan,
void (*disable_dma_irq)(void __iomem *ioaddr, u32 chan); bool rx, bool tx);
void (*disable_dma_irq)(void __iomem *ioaddr, u32 chan,
bool rx, bool tx);
void (*start_tx)(void __iomem *ioaddr, u32 chan); void (*start_tx)(void __iomem *ioaddr, u32 chan);
void (*stop_tx)(void __iomem *ioaddr, u32 chan); void (*stop_tx)(void __iomem *ioaddr, u32 chan);
void (*start_rx)(void __iomem *ioaddr, u32 chan); void (*start_rx)(void __iomem *ioaddr, u32 chan);
......
...@@ -88,6 +88,7 @@ struct stmmac_channel { ...@@ -88,6 +88,7 @@ struct stmmac_channel {
struct napi_struct rx_napi ____cacheline_aligned_in_smp; struct napi_struct rx_napi ____cacheline_aligned_in_smp;
struct napi_struct tx_napi ____cacheline_aligned_in_smp; struct napi_struct tx_napi ____cacheline_aligned_in_smp;
struct stmmac_priv *priv_data; struct stmmac_priv *priv_data;
spinlock_t lock;
u32 index; u32 index;
}; };
......
...@@ -2069,17 +2069,25 @@ static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan) ...@@ -2069,17 +2069,25 @@ static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
int status = stmmac_dma_interrupt_status(priv, priv->ioaddr, int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
&priv->xstats, chan); &priv->xstats, chan);
struct stmmac_channel *ch = &priv->channel[chan]; struct stmmac_channel *ch = &priv->channel[chan];
unsigned long flags;
if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) { if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
if (napi_schedule_prep(&ch->rx_napi)) { if (napi_schedule_prep(&ch->rx_napi)) {
stmmac_disable_dma_irq(priv, priv->ioaddr, chan); spin_lock_irqsave(&ch->lock, flags);
stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
spin_unlock_irqrestore(&ch->lock, flags);
__napi_schedule_irqoff(&ch->rx_napi); __napi_schedule_irqoff(&ch->rx_napi);
status |= handle_tx;
} }
} }
if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
napi_schedule_irqoff(&ch->tx_napi); if (napi_schedule_prep(&ch->tx_napi)) {
spin_lock_irqsave(&ch->lock, flags);
stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
spin_unlock_irqrestore(&ch->lock, flags);
__napi_schedule_irqoff(&ch->tx_napi);
}
}
return status; return status;
} }
...@@ -2274,14 +2282,14 @@ static void stmmac_tx_timer(struct timer_list *t) ...@@ -2274,14 +2282,14 @@ static void stmmac_tx_timer(struct timer_list *t)
ch = &priv->channel[tx_q->queue_index]; ch = &priv->channel[tx_q->queue_index];
/* if (likely(napi_schedule_prep(&ch->tx_napi))) {
* If NAPI is already running we can miss some events. Let's rearm unsigned long flags;
* the timer and try again.
*/ spin_lock_irqsave(&ch->lock, flags);
if (likely(napi_schedule_prep(&ch->tx_napi))) stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
spin_unlock_irqrestore(&ch->lock, flags);
__napi_schedule(&ch->tx_napi); __napi_schedule(&ch->tx_napi);
else }
mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(10));
} }
/** /**
...@@ -3751,8 +3759,14 @@ static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget) ...@@ -3751,8 +3759,14 @@ static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
priv->xstats.napi_poll++; priv->xstats.napi_poll++;
work_done = stmmac_rx(priv, budget, chan); work_done = stmmac_rx(priv, budget, chan);
if (work_done < budget && napi_complete_done(napi, work_done)) if (work_done < budget && napi_complete_done(napi, work_done)) {
stmmac_enable_dma_irq(priv, priv->ioaddr, chan); unsigned long flags;
spin_lock_irqsave(&ch->lock, flags);
stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
spin_unlock_irqrestore(&ch->lock, flags);
}
return work_done; return work_done;
} }
...@@ -3761,7 +3775,6 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget) ...@@ -3761,7 +3775,6 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
struct stmmac_channel *ch = struct stmmac_channel *ch =
container_of(napi, struct stmmac_channel, tx_napi); container_of(napi, struct stmmac_channel, tx_napi);
struct stmmac_priv *priv = ch->priv_data; struct stmmac_priv *priv = ch->priv_data;
struct stmmac_tx_queue *tx_q;
u32 chan = ch->index; u32 chan = ch->index;
int work_done; int work_done;
...@@ -3770,15 +3783,12 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget) ...@@ -3770,15 +3783,12 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
work_done = stmmac_tx_clean(priv, DMA_TX_SIZE, chan); work_done = stmmac_tx_clean(priv, DMA_TX_SIZE, chan);
work_done = min(work_done, budget); work_done = min(work_done, budget);
if (work_done < budget) if (work_done < budget && napi_complete_done(napi, work_done)) {
napi_complete_done(napi, work_done); unsigned long flags;
/* Force transmission restart */ spin_lock_irqsave(&ch->lock, flags);
tx_q = &priv->tx_queue[chan]; stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
if (tx_q->cur_tx != tx_q->dirty_tx) { spin_unlock_irqrestore(&ch->lock, flags);
stmmac_enable_dma_transmission(priv, priv->ioaddr);
stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr,
chan);
} }
return work_done; return work_done;
...@@ -4714,6 +4724,7 @@ int stmmac_dvr_probe(struct device *device, ...@@ -4714,6 +4724,7 @@ int stmmac_dvr_probe(struct device *device,
for (queue = 0; queue < maxq; queue++) { for (queue = 0; queue < maxq; queue++) {
struct stmmac_channel *ch = &priv->channel[queue]; struct stmmac_channel *ch = &priv->channel[queue];
spin_lock_init(&ch->lock);
ch->priv_data = priv; ch->priv_data = priv;
ch->index = queue; ch->index = queue;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment