Commit 29896a67 authored by Giuseppe CAVALLARO's avatar Giuseppe CAVALLARO Committed by David S. Miller

stmmac: fix chained mode

This patch is to fix the chain mode that was broken
and generated a panic. This patch reviews the chain/ring
modes now shaing the same structure and taking care
about the pointers and callbacks.
Signed-off-by: default avatarGiuseppe Cavallaro <peppe.cavallaro@st.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent d916701c
...@@ -151,7 +151,7 @@ static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p) ...@@ -151,7 +151,7 @@ static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
sizeof(struct dma_desc))); sizeof(struct dma_desc)));
} }
const struct stmmac_chain_mode_ops chain_mode_ops = { const struct stmmac_mode_ops chain_mode_ops = {
.init = stmmac_init_dma_chain, .init = stmmac_init_dma_chain,
.is_jumbo_frm = stmmac_is_jumbo_frm, .is_jumbo_frm = stmmac_is_jumbo_frm,
.jumbo_frm = stmmac_jumbo_frm, .jumbo_frm = stmmac_jumbo_frm,
......
...@@ -419,20 +419,13 @@ struct mii_regs { ...@@ -419,20 +419,13 @@ struct mii_regs {
unsigned int data; /* MII Data */ unsigned int data; /* MII Data */
}; };
struct stmmac_ring_mode_ops { struct stmmac_mode_ops {
unsigned int (*is_jumbo_frm) (int len, int ehn_desc);
unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum);
void (*refill_desc3) (void *priv, struct dma_desc *p);
void (*init_desc3) (struct dma_desc *p);
void (*clean_desc3) (void *priv, struct dma_desc *p);
int (*set_16kib_bfsize) (int mtu);
};
struct stmmac_chain_mode_ops {
void (*init) (void *des, dma_addr_t phy_addr, unsigned int size, void (*init) (void *des, dma_addr_t phy_addr, unsigned int size,
unsigned int extend_desc); unsigned int extend_desc);
unsigned int (*is_jumbo_frm) (int len, int ehn_desc); unsigned int (*is_jumbo_frm) (int len, int ehn_desc);
unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum); unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum);
int (*set_16kib_bfsize)(int mtu);
void (*init_desc3)(struct dma_desc *p);
void (*refill_desc3) (void *priv, struct dma_desc *p); void (*refill_desc3) (void *priv, struct dma_desc *p);
void (*clean_desc3) (void *priv, struct dma_desc *p); void (*clean_desc3) (void *priv, struct dma_desc *p);
}; };
...@@ -441,8 +434,7 @@ struct mac_device_info { ...@@ -441,8 +434,7 @@ struct mac_device_info {
const struct stmmac_ops *mac; const struct stmmac_ops *mac;
const struct stmmac_desc_ops *desc; const struct stmmac_desc_ops *desc;
const struct stmmac_dma_ops *dma; const struct stmmac_dma_ops *dma;
const struct stmmac_ring_mode_ops *ring; const struct stmmac_mode_ops *mode;
const struct stmmac_chain_mode_ops *chain;
const struct stmmac_hwtimestamp *ptp; const struct stmmac_hwtimestamp *ptp;
struct mii_regs mii; /* MII register Addresses */ struct mii_regs mii; /* MII register Addresses */
struct mac_link link; struct mac_link link;
...@@ -460,7 +452,7 @@ void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, ...@@ -460,7 +452,7 @@ void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
void stmmac_set_mac(void __iomem *ioaddr, bool enable); void stmmac_set_mac(void __iomem *ioaddr, bool enable);
void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr); void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
extern const struct stmmac_ring_mode_ops ring_mode_ops; extern const struct stmmac_mode_ops ring_mode_ops;
extern const struct stmmac_chain_mode_ops chain_mode_ops; extern const struct stmmac_mode_ops chain_mode_ops;
#endif /* __COMMON_H__ */ #endif /* __COMMON_H__ */
...@@ -100,10 +100,9 @@ static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p) ...@@ -100,10 +100,9 @@ static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p)
{ {
struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr; struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
if (unlikely(priv->plat->has_gmac)) /* Fill DES3 in case of RING mode */
/* Fill DES3 in case of RING mode */ if (priv->dma_buf_sz >= BUF_SIZE_8KiB)
if (priv->dma_buf_sz >= BUF_SIZE_8KiB) p->des3 = p->des2 + BUF_SIZE_8KiB;
p->des3 = p->des2 + BUF_SIZE_8KiB;
} }
/* In ring mode we need to fill the desc3 because it is used as buffer */ /* In ring mode we need to fill the desc3 because it is used as buffer */
...@@ -126,7 +125,7 @@ static int stmmac_set_16kib_bfsize(int mtu) ...@@ -126,7 +125,7 @@ static int stmmac_set_16kib_bfsize(int mtu)
return ret; return ret;
} }
const struct stmmac_ring_mode_ops ring_mode_ops = { const struct stmmac_mode_ops ring_mode_ops = {
.is_jumbo_frm = stmmac_is_jumbo_frm, .is_jumbo_frm = stmmac_is_jumbo_frm,
.jumbo_frm = stmmac_jumbo_frm, .jumbo_frm = stmmac_jumbo_frm,
.refill_desc3 = stmmac_refill_desc3, .refill_desc3 = stmmac_refill_desc3,
......
...@@ -966,9 +966,9 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, ...@@ -966,9 +966,9 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
p->des2 = priv->rx_skbuff_dma[i]; p->des2 = priv->rx_skbuff_dma[i];
if ((priv->mode == STMMAC_RING_MODE) && if ((priv->hw->mode->init_desc3) &&
(priv->dma_buf_sz == BUF_SIZE_16KiB)) (priv->dma_buf_sz == BUF_SIZE_16KiB))
priv->hw->ring->init_desc3(p); priv->hw->mode->init_desc3(p);
return 0; return 0;
} }
...@@ -999,11 +999,8 @@ static int init_dma_desc_rings(struct net_device *dev) ...@@ -999,11 +999,8 @@ static int init_dma_desc_rings(struct net_device *dev)
unsigned int bfsize = 0; unsigned int bfsize = 0;
int ret = -ENOMEM; int ret = -ENOMEM;
/* Set the max buffer size according to the DESC mode if (priv->hw->mode->set_16kib_bfsize)
* and the MTU. Note that RING mode allows 16KiB bsize. bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
*/
if (priv->mode == STMMAC_RING_MODE)
bfsize = priv->hw->ring->set_16kib_bfsize(dev->mtu);
if (bfsize < BUF_SIZE_16KiB) if (bfsize < BUF_SIZE_16KiB)
bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
...@@ -1044,15 +1041,15 @@ static int init_dma_desc_rings(struct net_device *dev) ...@@ -1044,15 +1041,15 @@ static int init_dma_desc_rings(struct net_device *dev)
/* Setup the chained descriptor addresses */ /* Setup the chained descriptor addresses */
if (priv->mode == STMMAC_CHAIN_MODE) { if (priv->mode == STMMAC_CHAIN_MODE) {
if (priv->extend_desc) { if (priv->extend_desc) {
priv->hw->chain->init(priv->dma_erx, priv->dma_rx_phy, priv->hw->mode->init(priv->dma_erx, priv->dma_rx_phy,
rxsize, 1); rxsize, 1);
priv->hw->chain->init(priv->dma_etx, priv->dma_tx_phy, priv->hw->mode->init(priv->dma_etx, priv->dma_tx_phy,
txsize, 1); txsize, 1);
} else { } else {
priv->hw->chain->init(priv->dma_rx, priv->dma_rx_phy, priv->hw->mode->init(priv->dma_rx, priv->dma_rx_phy,
rxsize, 0); rxsize, 0);
priv->hw->chain->init(priv->dma_tx, priv->dma_tx_phy, priv->hw->mode->init(priv->dma_tx, priv->dma_tx_phy,
txsize, 0); txsize, 0);
} }
} }
...@@ -1303,7 +1300,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv) ...@@ -1303,7 +1300,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
DMA_TO_DEVICE); DMA_TO_DEVICE);
priv->tx_skbuff_dma[entry] = 0; priv->tx_skbuff_dma[entry] = 0;
} }
priv->hw->ring->clean_desc3(priv, p); priv->hw->mode->clean_desc3(priv, p);
if (likely(skb != NULL)) { if (likely(skb != NULL)) {
dev_kfree_skb(skb); dev_kfree_skb(skb);
...@@ -1859,6 +1856,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1859,6 +1856,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
int nfrags = skb_shinfo(skb)->nr_frags; int nfrags = skb_shinfo(skb)->nr_frags;
struct dma_desc *desc, *first; struct dma_desc *desc, *first;
unsigned int nopaged_len = skb_headlen(skb); unsigned int nopaged_len = skb_headlen(skb);
unsigned int enh_desc = priv->plat->enh_desc;
if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) { if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
if (!netif_queue_stopped(dev)) { if (!netif_queue_stopped(dev)) {
...@@ -1886,27 +1884,19 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1886,27 +1884,19 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
first = desc; first = desc;
/* To program the descriptors according to the size of the frame */ /* To program the descriptors according to the size of the frame */
if (priv->mode == STMMAC_RING_MODE) { if (enh_desc)
is_jumbo = priv->hw->ring->is_jumbo_frm(skb->len, is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
priv->plat->enh_desc);
if (unlikely(is_jumbo))
entry = priv->hw->ring->jumbo_frm(priv, skb,
csum_insertion);
} else {
is_jumbo = priv->hw->chain->is_jumbo_frm(skb->len,
priv->plat->enh_desc);
if (unlikely(is_jumbo))
entry = priv->hw->chain->jumbo_frm(priv, skb,
csum_insertion);
}
if (likely(!is_jumbo)) { if (likely(!is_jumbo)) {
desc->des2 = dma_map_single(priv->device, skb->data, desc->des2 = dma_map_single(priv->device, skb->data,
nopaged_len, DMA_TO_DEVICE); nopaged_len, DMA_TO_DEVICE);
priv->tx_skbuff_dma[entry] = desc->des2; priv->tx_skbuff_dma[entry] = desc->des2;
priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
csum_insertion, priv->mode); csum_insertion, priv->mode);
} else } else {
desc = first; desc = first;
entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion);
}
for (i = 0; i < nfrags; i++) { for (i = 0; i < nfrags; i++) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
...@@ -2044,7 +2034,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv) ...@@ -2044,7 +2034,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
p->des2 = priv->rx_skbuff_dma[entry]; p->des2 = priv->rx_skbuff_dma[entry];
priv->hw->ring->refill_desc3(priv, p); priv->hw->mode->refill_desc3(priv, p);
if (netif_msg_rx_status(priv)) if (netif_msg_rx_status(priv))
pr_debug("\trefill entry #%d\n", entry); pr_debug("\trefill entry #%d\n", entry);
...@@ -2648,11 +2638,11 @@ static int stmmac_hw_init(struct stmmac_priv *priv) ...@@ -2648,11 +2638,11 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
/* To use the chained or ring mode */ /* To use the chained or ring mode */
if (chain_mode) { if (chain_mode) {
priv->hw->chain = &chain_mode_ops; priv->hw->mode = &chain_mode_ops;
pr_info(" Chain mode enabled\n"); pr_info(" Chain mode enabled\n");
priv->mode = STMMAC_CHAIN_MODE; priv->mode = STMMAC_CHAIN_MODE;
} else { } else {
priv->hw->ring = &ring_mode_ops; priv->hw->mode = &ring_mode_ops;
pr_info(" Ring mode enabled\n"); pr_info(" Ring mode enabled\n");
priv->mode = STMMAC_RING_MODE; priv->mode = STMMAC_RING_MODE;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment