Commit 40d0af56 authored by David S. Miller's avatar David S. Miller

Merge branch 'stmmac-Improvements-for-multi-queuing-and-for-AVB'

Jose Abreu says:

====================
net: stmmac: Improvements for multi-queuing and for AVB

Two improvements for stmmac: First one corrects the available fifo
size per queue, second one corrects enabling of AVB queues. More info
in commit log.

Cc: David S. Miller <davem@davemloft.net>
Cc: Joao Pinto <jpinto@synopsys.com>
Cc: Giuseppe Cavallaro <peppe.cavallaro@st.com>
Cc: Alexandre Torgue <alexandre.torgue@st.com>

Changes from v1:
- Fix typo in second patch
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
Acked-by: default avatarGiuseppe Cavallaro <peppe.cavallaro@st.com>
parents 258bbb1b a0daae13
...@@ -442,8 +442,9 @@ struct stmmac_dma_ops { ...@@ -442,8 +442,9 @@ struct stmmac_dma_ops {
void (*dma_mode)(void __iomem *ioaddr, int txmode, int rxmode, void (*dma_mode)(void __iomem *ioaddr, int txmode, int rxmode,
int rxfifosz); int rxfifosz);
void (*dma_rx_mode)(void __iomem *ioaddr, int mode, u32 channel, void (*dma_rx_mode)(void __iomem *ioaddr, int mode, u32 channel,
int fifosz); int fifosz, u8 qmode);
void (*dma_tx_mode)(void __iomem *ioaddr, int mode, u32 channel); void (*dma_tx_mode)(void __iomem *ioaddr, int mode, u32 channel,
int fifosz, u8 qmode);
/* To track extra statistic (if supported) */ /* To track extra statistic (if supported) */
void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x, void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
void __iomem *ioaddr); void __iomem *ioaddr);
......
...@@ -225,6 +225,8 @@ enum power_event { ...@@ -225,6 +225,8 @@ enum power_event {
#define MTL_CHAN_RX_DEBUG(x) (MTL_CHANX_BASE_ADDR(x) + 0x38) #define MTL_CHAN_RX_DEBUG(x) (MTL_CHANX_BASE_ADDR(x) + 0x38)
#define MTL_OP_MODE_RSF BIT(5) #define MTL_OP_MODE_RSF BIT(5)
#define MTL_OP_MODE_TXQEN_MASK GENMASK(3, 2)
#define MTL_OP_MODE_TXQEN_AV BIT(2)
#define MTL_OP_MODE_TXQEN BIT(3) #define MTL_OP_MODE_TXQEN BIT(3)
#define MTL_OP_MODE_TSF BIT(1) #define MTL_OP_MODE_TSF BIT(1)
......
...@@ -191,7 +191,7 @@ static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 number_chan) ...@@ -191,7 +191,7 @@ static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 number_chan)
} }
static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode, static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode,
u32 channel, int fifosz) u32 channel, int fifosz, u8 qmode)
{ {
unsigned int rqs = fifosz / 256 - 1; unsigned int rqs = fifosz / 256 - 1;
u32 mtl_rx_op, mtl_rx_int; u32 mtl_rx_op, mtl_rx_int;
...@@ -218,8 +218,10 @@ static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode, ...@@ -218,8 +218,10 @@ static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode,
mtl_rx_op &= ~MTL_OP_MODE_RQS_MASK; mtl_rx_op &= ~MTL_OP_MODE_RQS_MASK;
mtl_rx_op |= rqs << MTL_OP_MODE_RQS_SHIFT; mtl_rx_op |= rqs << MTL_OP_MODE_RQS_SHIFT;
/* enable flow control only if each channel gets 4 KiB or more FIFO */ /* Enable flow control only if each channel gets 4 KiB or more FIFO and
if (fifosz >= 4096) { * only if channel is not an AVB channel.
*/
if ((fifosz >= 4096) && (qmode != MTL_QUEUE_AVB)) {
unsigned int rfd, rfa; unsigned int rfd, rfa;
mtl_rx_op |= MTL_OP_MODE_EHFC; mtl_rx_op |= MTL_OP_MODE_EHFC;
...@@ -271,9 +273,10 @@ static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode, ...@@ -271,9 +273,10 @@ static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode,
} }
static void dwmac4_dma_tx_chan_op_mode(void __iomem *ioaddr, int mode, static void dwmac4_dma_tx_chan_op_mode(void __iomem *ioaddr, int mode,
u32 channel) u32 channel, int fifosz, u8 qmode)
{ {
u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel)); u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel));
unsigned int tqs = fifosz / 256 - 1;
if (mode == SF_DMA_MODE) { if (mode == SF_DMA_MODE) {
pr_debug("GMAC: enable TX store and forward mode\n"); pr_debug("GMAC: enable TX store and forward mode\n");
...@@ -306,12 +309,18 @@ static void dwmac4_dma_tx_chan_op_mode(void __iomem *ioaddr, int mode, ...@@ -306,12 +309,18 @@ static void dwmac4_dma_tx_chan_op_mode(void __iomem *ioaddr, int mode,
* For an IP with DWC_EQOS_NUM_TXQ > 1, the fields TXQEN and TQS are R/W * For an IP with DWC_EQOS_NUM_TXQ > 1, the fields TXQEN and TQS are R/W
* with reset values: TXQEN off, TQS 256 bytes. * with reset values: TXQEN off, TQS 256 bytes.
* *
* Write the bits in both cases, since it will have no effect when RO. * TXQEN must be written for multi-channel operation and TQS must
* For DWC_EQOS_NUM_TXQ > 1, the top bits in MTL_OP_MODE_TQS_MASK might * reflect the available fifo size per queue (total fifo size / number
* be RO, however, writing the whole TQS field will result in a value * of enabled queues).
* equal to DWC_EQOS_TXFIFO_SIZE, just like for DWC_EQOS_NUM_TXQ == 1.
*/ */
mtl_tx_op |= MTL_OP_MODE_TXQEN | MTL_OP_MODE_TQS_MASK; mtl_tx_op &= ~MTL_OP_MODE_TXQEN_MASK;
if (qmode != MTL_QUEUE_AVB)
mtl_tx_op |= MTL_OP_MODE_TXQEN;
else
mtl_tx_op |= MTL_OP_MODE_TXQEN_AV;
mtl_tx_op &= ~MTL_OP_MODE_TQS_MASK;
mtl_tx_op |= tqs << MTL_OP_MODE_TQS_SHIFT;
writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel)); writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel));
} }
......
...@@ -1750,12 +1750,20 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) ...@@ -1750,12 +1750,20 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
u32 rx_channels_count = priv->plat->rx_queues_to_use; u32 rx_channels_count = priv->plat->rx_queues_to_use;
u32 tx_channels_count = priv->plat->tx_queues_to_use; u32 tx_channels_count = priv->plat->tx_queues_to_use;
int rxfifosz = priv->plat->rx_fifo_size; int rxfifosz = priv->plat->rx_fifo_size;
int txfifosz = priv->plat->tx_fifo_size;
u32 txmode = 0; u32 txmode = 0;
u32 rxmode = 0; u32 rxmode = 0;
u32 chan = 0; u32 chan = 0;
u8 qmode = 0;
if (rxfifosz == 0) if (rxfifosz == 0)
rxfifosz = priv->dma_cap.rx_fifo_size; rxfifosz = priv->dma_cap.rx_fifo_size;
if (txfifosz == 0)
txfifosz = priv->dma_cap.tx_fifo_size;
/* Adjust for real per queue fifo size */
rxfifosz /= rx_channels_count;
txfifosz /= tx_channels_count;
if (priv->plat->force_thresh_dma_mode) { if (priv->plat->force_thresh_dma_mode) {
txmode = tc; txmode = tc;
...@@ -1778,12 +1786,19 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) ...@@ -1778,12 +1786,19 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
/* configure all channels */ /* configure all channels */
if (priv->synopsys_id >= DWMAC_CORE_4_00) { if (priv->synopsys_id >= DWMAC_CORE_4_00) {
for (chan = 0; chan < rx_channels_count; chan++) for (chan = 0; chan < rx_channels_count; chan++) {
qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan, priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
rxfifosz); rxfifosz, qmode);
}
for (chan = 0; chan < tx_channels_count; chan++) for (chan = 0; chan < tx_channels_count; chan++) {
priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan); qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan,
txfifosz, qmode);
}
} else { } else {
priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode, priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
rxfifosz); rxfifosz);
...@@ -1946,15 +1961,27 @@ static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan) ...@@ -1946,15 +1961,27 @@ static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode, static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
u32 rxmode, u32 chan) u32 rxmode, u32 chan)
{ {
u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
u32 rx_channels_count = priv->plat->rx_queues_to_use;
u32 tx_channels_count = priv->plat->tx_queues_to_use;
int rxfifosz = priv->plat->rx_fifo_size; int rxfifosz = priv->plat->rx_fifo_size;
int txfifosz = priv->plat->tx_fifo_size;
if (rxfifosz == 0) if (rxfifosz == 0)
rxfifosz = priv->dma_cap.rx_fifo_size; rxfifosz = priv->dma_cap.rx_fifo_size;
if (txfifosz == 0)
txfifosz = priv->dma_cap.tx_fifo_size;
/* Adjust for real per queue fifo size */
rxfifosz /= rx_channels_count;
txfifosz /= tx_channels_count;
if (priv->synopsys_id >= DWMAC_CORE_4_00) { if (priv->synopsys_id >= DWMAC_CORE_4_00) {
priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan, priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
rxfifosz); rxfifosz, rxqmode);
priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan); priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan,
txfifosz, txqmode);
} else { } else {
priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode, priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
rxfifosz); rxfifosz);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment