Commit 579a25a8 authored by Jose Abreu's avatar Jose Abreu Committed by Jakub Kicinski

net: stmmac: Initial support for TBS

Adds the initial hooks for TBS support. This needs a 32 byte descriptor
in order for it to work with current HW. Adds all the logic for Enhanced
Descriptors in main core but no HW related logic for now.

Changes from v2:
- Use bitfield for TBS status / support (Jakub)
- Remove unneeded cache alignment (Jakub)
- Fix checkpatch issues
Signed-off-by: default avatarJose Abreu <Jose.Abreu@synopsys.com>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent ab9837b5
...@@ -171,6 +171,15 @@ struct dma_extended_desc { ...@@ -171,6 +171,15 @@ struct dma_extended_desc {
__le32 des7; /* Tx/Rx Timestamp High */ __le32 des7; /* Tx/Rx Timestamp High */
}; };
/* Enhanced descriptor for TBS */
struct dma_edesc {
__le32 des4;
__le32 des5;
__le32 des6;
__le32 des7;
struct dma_desc basic;
};
/* Transmit checksum insertion control */ /* Transmit checksum insertion control */
#define TX_CIC_FULL 3 /* Include IP header and pseudoheader */ #define TX_CIC_FULL 3 /* Include IP header and pseudoheader */
......
...@@ -29,6 +29,7 @@ struct stmmac_extra_stats; ...@@ -29,6 +29,7 @@ struct stmmac_extra_stats;
struct stmmac_safety_stats; struct stmmac_safety_stats;
struct dma_desc; struct dma_desc;
struct dma_extended_desc; struct dma_extended_desc;
struct dma_edesc;
/* Descriptors helpers */ /* Descriptors helpers */
struct stmmac_desc_ops { struct stmmac_desc_ops {
...@@ -95,6 +96,7 @@ struct stmmac_desc_ops { ...@@ -95,6 +96,7 @@ struct stmmac_desc_ops {
void (*set_vlan_tag)(struct dma_desc *p, u16 tag, u16 inner_tag, void (*set_vlan_tag)(struct dma_desc *p, u16 tag, u16 inner_tag,
u32 inner_type); u32 inner_type);
void (*set_vlan)(struct dma_desc *p, u32 type); void (*set_vlan)(struct dma_desc *p, u32 type);
void (*set_tbs)(struct dma_edesc *p, u32 sec, u32 nsec);
}; };
#define stmmac_init_rx_desc(__priv, __args...) \ #define stmmac_init_rx_desc(__priv, __args...) \
...@@ -157,6 +159,8 @@ struct stmmac_desc_ops { ...@@ -157,6 +159,8 @@ struct stmmac_desc_ops {
stmmac_do_void_callback(__priv, desc, set_vlan_tag, __args) stmmac_do_void_callback(__priv, desc, set_vlan_tag, __args)
#define stmmac_set_desc_vlan(__priv, __args...) \ #define stmmac_set_desc_vlan(__priv, __args...) \
stmmac_do_void_callback(__priv, desc, set_vlan, __args) stmmac_do_void_callback(__priv, desc, set_vlan, __args)
#define stmmac_set_desc_tbs(__priv, __args...) \
stmmac_do_void_callback(__priv, desc, set_tbs, __args)
struct stmmac_dma_cfg; struct stmmac_dma_cfg;
struct dma_features; struct dma_features;
...@@ -210,6 +214,7 @@ struct stmmac_dma_ops { ...@@ -210,6 +214,7 @@ struct stmmac_dma_ops {
void (*qmode)(void __iomem *ioaddr, u32 channel, u8 qmode); void (*qmode)(void __iomem *ioaddr, u32 channel, u8 qmode);
void (*set_bfsize)(void __iomem *ioaddr, int bfsize, u32 chan); void (*set_bfsize)(void __iomem *ioaddr, int bfsize, u32 chan);
void (*enable_sph)(void __iomem *ioaddr, bool en, u32 chan); void (*enable_sph)(void __iomem *ioaddr, bool en, u32 chan);
int (*enable_tbs)(void __iomem *ioaddr, bool en, u32 chan);
}; };
#define stmmac_reset(__priv, __args...) \ #define stmmac_reset(__priv, __args...) \
...@@ -268,6 +273,8 @@ struct stmmac_dma_ops { ...@@ -268,6 +273,8 @@ struct stmmac_dma_ops {
stmmac_do_void_callback(__priv, dma, set_bfsize, __args) stmmac_do_void_callback(__priv, dma, set_bfsize, __args)
#define stmmac_enable_sph(__priv, __args...) \ #define stmmac_enable_sph(__priv, __args...) \
stmmac_do_void_callback(__priv, dma, enable_sph, __args) stmmac_do_void_callback(__priv, dma, enable_sph, __args)
#define stmmac_enable_tbs(__priv, __args...) \
stmmac_do_callback(__priv, dma, enable_tbs, __args)
struct mac_device_info; struct mac_device_info;
struct net_device; struct net_device;
......
...@@ -39,13 +39,18 @@ struct stmmac_tx_info { ...@@ -39,13 +39,18 @@ struct stmmac_tx_info {
bool is_jumbo; bool is_jumbo;
}; };
#define STMMAC_TBS_AVAIL BIT(0)
#define STMMAC_TBS_EN BIT(1)
/* Frequently used values are kept adjacent for cache effect */ /* Frequently used values are kept adjacent for cache effect */
struct stmmac_tx_queue { struct stmmac_tx_queue {
u32 tx_count_frames; u32 tx_count_frames;
int tbs;
struct timer_list txtimer; struct timer_list txtimer;
u32 queue_index; u32 queue_index;
struct stmmac_priv *priv_data; struct stmmac_priv *priv_data;
struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp; struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp;
struct dma_edesc *dma_entx;
struct dma_desc *dma_tx; struct dma_desc *dma_tx;
struct sk_buff **tx_skbuff; struct sk_buff **tx_skbuff;
struct stmmac_tx_info *tx_skbuff_dma; struct stmmac_tx_info *tx_skbuff_dma;
......
...@@ -1090,6 +1090,8 @@ static void stmmac_display_tx_rings(struct stmmac_priv *priv) ...@@ -1090,6 +1090,8 @@ static void stmmac_display_tx_rings(struct stmmac_priv *priv)
if (priv->extend_desc) if (priv->extend_desc)
head_tx = (void *)tx_q->dma_etx; head_tx = (void *)tx_q->dma_etx;
else if (tx_q->tbs & STMMAC_TBS_AVAIL)
head_tx = (void *)tx_q->dma_entx;
else else
head_tx = (void *)tx_q->dma_tx; head_tx = (void *)tx_q->dma_tx;
...@@ -1163,13 +1165,19 @@ static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue) ...@@ -1163,13 +1165,19 @@ static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
int i; int i;
/* Clear the TX descriptors */ /* Clear the TX descriptors */
for (i = 0; i < DMA_TX_SIZE; i++) for (i = 0; i < DMA_TX_SIZE; i++) {
int last = (i == (DMA_TX_SIZE - 1));
struct dma_desc *p;
if (priv->extend_desc) if (priv->extend_desc)
stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic, p = &tx_q->dma_etx[i].basic;
priv->mode, (i == DMA_TX_SIZE - 1)); else if (tx_q->tbs & STMMAC_TBS_AVAIL)
p = &tx_q->dma_entx[i].basic;
else else
stmmac_init_tx_desc(priv, &tx_q->dma_tx[i], p = &tx_q->dma_tx[i];
priv->mode, (i == DMA_TX_SIZE - 1));
stmmac_init_tx_desc(priv, p, priv->mode, last);
}
} }
/** /**
...@@ -1383,7 +1391,7 @@ static int init_dma_tx_desc_rings(struct net_device *dev) ...@@ -1383,7 +1391,7 @@ static int init_dma_tx_desc_rings(struct net_device *dev)
if (priv->extend_desc) if (priv->extend_desc)
stmmac_mode_init(priv, tx_q->dma_etx, stmmac_mode_init(priv, tx_q->dma_etx,
tx_q->dma_tx_phy, DMA_TX_SIZE, 1); tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
else else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
stmmac_mode_init(priv, tx_q->dma_tx, stmmac_mode_init(priv, tx_q->dma_tx,
tx_q->dma_tx_phy, DMA_TX_SIZE, 0); tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
} }
...@@ -1392,6 +1400,8 @@ static int init_dma_tx_desc_rings(struct net_device *dev) ...@@ -1392,6 +1400,8 @@ static int init_dma_tx_desc_rings(struct net_device *dev)
struct dma_desc *p; struct dma_desc *p;
if (priv->extend_desc) if (priv->extend_desc)
p = &((tx_q->dma_etx + i)->basic); p = &((tx_q->dma_etx + i)->basic);
else if (tx_q->tbs & STMMAC_TBS_AVAIL)
p = &((tx_q->dma_entx + i)->basic);
else else
p = tx_q->dma_tx + i; p = tx_q->dma_tx + i;
...@@ -1511,19 +1521,26 @@ static void free_dma_tx_desc_resources(struct stmmac_priv *priv) ...@@ -1511,19 +1521,26 @@ static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
/* Free TX queue resources */ /* Free TX queue resources */
for (queue = 0; queue < tx_count; queue++) { for (queue = 0; queue < tx_count; queue++) {
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
size_t size;
void *addr;
/* Release the DMA TX socket buffers */ /* Release the DMA TX socket buffers */
dma_free_tx_skbufs(priv, queue); dma_free_tx_skbufs(priv, queue);
/* Free DMA regions of consistent memory previously allocated */ if (priv->extend_desc) {
if (!priv->extend_desc) size = sizeof(struct dma_extended_desc);
dma_free_coherent(priv->device, addr = tx_q->dma_etx;
DMA_TX_SIZE * sizeof(struct dma_desc), } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
tx_q->dma_tx, tx_q->dma_tx_phy); size = sizeof(struct dma_edesc);
else addr = tx_q->dma_entx;
dma_free_coherent(priv->device, DMA_TX_SIZE * } else {
sizeof(struct dma_extended_desc), size = sizeof(struct dma_desc);
tx_q->dma_etx, tx_q->dma_tx_phy); addr = tx_q->dma_tx;
}
size *= DMA_TX_SIZE;
dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
kfree(tx_q->tx_skbuff_dma); kfree(tx_q->tx_skbuff_dma);
kfree(tx_q->tx_skbuff); kfree(tx_q->tx_skbuff);
...@@ -1616,6 +1633,8 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv) ...@@ -1616,6 +1633,8 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
/* TX queues buffers and DMA */ /* TX queues buffers and DMA */
for (queue = 0; queue < tx_count; queue++) { for (queue = 0; queue < tx_count; queue++) {
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
size_t size;
void *addr;
tx_q->queue_index = queue; tx_q->queue_index = queue;
tx_q->priv_data = priv; tx_q->priv_data = priv;
...@@ -1632,28 +1651,32 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv) ...@@ -1632,28 +1651,32 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
if (!tx_q->tx_skbuff) if (!tx_q->tx_skbuff)
goto err_dma; goto err_dma;
if (priv->extend_desc) { if (priv->extend_desc)
tx_q->dma_etx = dma_alloc_coherent(priv->device, size = sizeof(struct dma_extended_desc);
DMA_TX_SIZE * sizeof(struct dma_extended_desc), else if (tx_q->tbs & STMMAC_TBS_AVAIL)
&tx_q->dma_tx_phy, size = sizeof(struct dma_edesc);
GFP_KERNEL); else
if (!tx_q->dma_etx) size = sizeof(struct dma_desc);
goto err_dma;
} else { size *= DMA_TX_SIZE;
tx_q->dma_tx = dma_alloc_coherent(priv->device,
DMA_TX_SIZE * sizeof(struct dma_desc), addr = dma_alloc_coherent(priv->device, size,
&tx_q->dma_tx_phy, &tx_q->dma_tx_phy, GFP_KERNEL);
GFP_KERNEL); if (!addr)
if (!tx_q->dma_tx) goto err_dma;
goto err_dma;
} if (priv->extend_desc)
tx_q->dma_etx = addr;
else if (tx_q->tbs & STMMAC_TBS_AVAIL)
tx_q->dma_entx = addr;
else
tx_q->dma_tx = addr;
} }
return 0; return 0;
err_dma: err_dma:
free_dma_tx_desc_resources(priv); free_dma_tx_desc_resources(priv);
return ret; return ret;
} }
...@@ -1885,6 +1908,8 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) ...@@ -1885,6 +1908,8 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
if (priv->extend_desc) if (priv->extend_desc)
p = (struct dma_desc *)(tx_q->dma_etx + entry); p = (struct dma_desc *)(tx_q->dma_etx + entry);
else if (tx_q->tbs & STMMAC_TBS_AVAIL)
p = &tx_q->dma_entx[entry].basic;
else else
p = tx_q->dma_tx + entry; p = tx_q->dma_tx + entry;
...@@ -1983,19 +2008,12 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) ...@@ -1983,19 +2008,12 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan) static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
{ {
struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
int i;
netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan)); netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
stmmac_stop_tx_dma(priv, chan); stmmac_stop_tx_dma(priv, chan);
dma_free_tx_skbufs(priv, chan); dma_free_tx_skbufs(priv, chan);
for (i = 0; i < DMA_TX_SIZE; i++) stmmac_clear_tx_descriptors(priv, chan);
if (priv->extend_desc)
stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
priv->mode, (i == DMA_TX_SIZE - 1));
else
stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
priv->mode, (i == DMA_TX_SIZE - 1));
tx_q->dirty_tx = 0; tx_q->dirty_tx = 0;
tx_q->cur_tx = 0; tx_q->cur_tx = 0;
tx_q->mss = 0; tx_q->mss = 0;
...@@ -2632,6 +2650,14 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) ...@@ -2632,6 +2650,14 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
if (priv->dma_cap.vlins) if (priv->dma_cap.vlins)
stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT); stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
/* TBS */
for (chan = 0; chan < tx_cnt; chan++) {
struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
}
/* Start the ball rolling... */ /* Start the ball rolling... */
stmmac_start_all_dma(priv); stmmac_start_all_dma(priv);
...@@ -2689,6 +2715,16 @@ static int stmmac_open(struct net_device *dev) ...@@ -2689,6 +2715,16 @@ static int stmmac_open(struct net_device *dev)
priv->rx_copybreak = STMMAC_RX_COPYBREAK; priv->rx_copybreak = STMMAC_RX_COPYBREAK;
/* Earlier check for TBS */
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
if (stmmac_enable_tbs(priv, priv->ioaddr, tbs_en, chan))
tx_q->tbs &= ~STMMAC_TBS_AVAIL;
}
ret = alloc_dma_desc_resources(priv); ret = alloc_dma_desc_resources(priv);
if (ret < 0) { if (ret < 0) {
netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n", netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
...@@ -2837,7 +2873,11 @@ static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb, ...@@ -2837,7 +2873,11 @@ static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
tag = skb_vlan_tag_get(skb); tag = skb_vlan_tag_get(skb);
p = tx_q->dma_tx + tx_q->cur_tx; if (tx_q->tbs & STMMAC_TBS_AVAIL)
p = &tx_q->dma_entx[tx_q->cur_tx].basic;
else
p = &tx_q->dma_tx[tx_q->cur_tx];
if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type)) if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
return false; return false;
...@@ -2872,7 +2912,11 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des, ...@@ -2872,7 +2912,11 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
desc = tx_q->dma_tx + tx_q->cur_tx;
if (tx_q->tbs & STMMAC_TBS_AVAIL)
desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
else
desc = &tx_q->dma_tx[tx_q->cur_tx];
curr_addr = des + (total_len - tmp_len); curr_addr = des + (total_len - tmp_len);
if (priv->dma_cap.addr64 <= 32) if (priv->dma_cap.addr64 <= 32)
...@@ -2923,13 +2967,13 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2923,13 +2967,13 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
{ {
struct dma_desc *desc, *first, *mss_desc = NULL; struct dma_desc *desc, *first, *mss_desc = NULL;
struct stmmac_priv *priv = netdev_priv(dev); struct stmmac_priv *priv = netdev_priv(dev);
int desc_size, tmp_pay_len = 0, first_tx;
int nfrags = skb_shinfo(skb)->nr_frags; int nfrags = skb_shinfo(skb)->nr_frags;
u32 queue = skb_get_queue_mapping(skb); u32 queue = skb_get_queue_mapping(skb);
unsigned int first_entry, tx_packets; unsigned int first_entry, tx_packets;
int tmp_pay_len = 0, first_tx;
struct stmmac_tx_queue *tx_q; struct stmmac_tx_queue *tx_q;
u8 proto_hdr_len, hdr;
bool has_vlan, set_ic; bool has_vlan, set_ic;
u8 proto_hdr_len, hdr;
u32 pay_len, mss; u32 pay_len, mss;
dma_addr_t des; dma_addr_t des;
int i; int i;
...@@ -2966,7 +3010,11 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2966,7 +3010,11 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
/* set new MSS value if needed */ /* set new MSS value if needed */
if (mss != tx_q->mss) { if (mss != tx_q->mss) {
mss_desc = tx_q->dma_tx + tx_q->cur_tx; if (tx_q->tbs & STMMAC_TBS_AVAIL)
mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
else
mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
stmmac_set_mss(priv, mss_desc, mss); stmmac_set_mss(priv, mss_desc, mss);
tx_q->mss = mss; tx_q->mss = mss;
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
...@@ -2986,7 +3034,10 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2986,7 +3034,10 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
first_entry = tx_q->cur_tx; first_entry = tx_q->cur_tx;
WARN_ON(tx_q->tx_skbuff[first_entry]); WARN_ON(tx_q->tx_skbuff[first_entry]);
desc = tx_q->dma_tx + first_entry; if (tx_q->tbs & STMMAC_TBS_AVAIL)
desc = &tx_q->dma_entx[first_entry].basic;
else
desc = &tx_q->dma_tx[first_entry];
first = desc; first = desc;
if (has_vlan) if (has_vlan)
...@@ -3058,7 +3109,11 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3058,7 +3109,11 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
set_ic = false; set_ic = false;
if (set_ic) { if (set_ic) {
desc = &tx_q->dma_tx[tx_q->cur_tx]; if (tx_q->tbs & STMMAC_TBS_AVAIL)
desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
else
desc = &tx_q->dma_tx[tx_q->cur_tx];
tx_q->tx_count_frames = 0; tx_q->tx_count_frames = 0;
stmmac_set_tx_ic(priv, desc); stmmac_set_tx_ic(priv, desc);
priv->xstats.tx_set_ic_bit++; priv->xstats.tx_set_ic_bit++;
...@@ -3121,16 +3176,18 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3121,16 +3176,18 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n", pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
tx_q->cur_tx, first, nfrags); tx_q->cur_tx, first, nfrags);
stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0);
pr_info(">>> frame to be transmitted: "); pr_info(">>> frame to be transmitted: ");
print_pkt(skb->data, skb_headlen(skb)); print_pkt(skb->data, skb_headlen(skb));
} }
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc)); if (tx_q->tbs & STMMAC_TBS_AVAIL)
desc_size = sizeof(struct dma_edesc);
else
desc_size = sizeof(struct dma_desc);
tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
stmmac_tx_timer_arm(priv, queue); stmmac_tx_timer_arm(priv, queue);
...@@ -3160,10 +3217,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3160,10 +3217,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
u32 queue = skb_get_queue_mapping(skb); u32 queue = skb_get_queue_mapping(skb);
int nfrags = skb_shinfo(skb)->nr_frags; int nfrags = skb_shinfo(skb)->nr_frags;
int gso = skb_shinfo(skb)->gso_type; int gso = skb_shinfo(skb)->gso_type;
struct dma_edesc *tbs_desc = NULL;
int entry, desc_size, first_tx;
struct dma_desc *desc, *first; struct dma_desc *desc, *first;
struct stmmac_tx_queue *tx_q; struct stmmac_tx_queue *tx_q;
bool has_vlan, set_ic; bool has_vlan, set_ic;
int entry, first_tx;
dma_addr_t des; dma_addr_t des;
tx_q = &priv->tx_queue[queue]; tx_q = &priv->tx_queue[queue];
...@@ -3203,6 +3261,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3203,6 +3261,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (likely(priv->extend_desc)) if (likely(priv->extend_desc))
desc = (struct dma_desc *)(tx_q->dma_etx + entry); desc = (struct dma_desc *)(tx_q->dma_etx + entry);
else if (tx_q->tbs & STMMAC_TBS_AVAIL)
desc = &tx_q->dma_entx[entry].basic;
else else
desc = tx_q->dma_tx + entry; desc = tx_q->dma_tx + entry;
...@@ -3232,6 +3292,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3232,6 +3292,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (likely(priv->extend_desc)) if (likely(priv->extend_desc))
desc = (struct dma_desc *)(tx_q->dma_etx + entry); desc = (struct dma_desc *)(tx_q->dma_etx + entry);
else if (tx_q->tbs & STMMAC_TBS_AVAIL)
desc = &tx_q->dma_entx[entry].basic;
else else
desc = tx_q->dma_tx + entry; desc = tx_q->dma_tx + entry;
...@@ -3278,6 +3340,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3278,6 +3340,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (set_ic) { if (set_ic) {
if (likely(priv->extend_desc)) if (likely(priv->extend_desc))
desc = &tx_q->dma_etx[entry].basic; desc = &tx_q->dma_etx[entry].basic;
else if (tx_q->tbs & STMMAC_TBS_AVAIL)
desc = &tx_q->dma_entx[entry].basic;
else else
desc = &tx_q->dma_tx[entry]; desc = &tx_q->dma_tx[entry];
...@@ -3295,20 +3359,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3295,20 +3359,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
tx_q->cur_tx = entry; tx_q->cur_tx = entry;
if (netif_msg_pktdata(priv)) { if (netif_msg_pktdata(priv)) {
void *tx_head;
netdev_dbg(priv->dev, netdev_dbg(priv->dev,
"%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d", "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
entry, first, nfrags); entry, first, nfrags);
if (priv->extend_desc)
tx_head = (void *)tx_q->dma_etx;
else
tx_head = (void *)tx_q->dma_tx;
stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false);
netdev_dbg(priv->dev, ">>> frame to be transmitted: "); netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
print_pkt(skb->data, skb->len); print_pkt(skb->data, skb->len);
} }
...@@ -3354,12 +3409,19 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3354,12 +3409,19 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
/* Prepare the first descriptor setting the OWN bit too */ /* Prepare the first descriptor setting the OWN bit too */
stmmac_prepare_tx_desc(priv, first, 1, nopaged_len, stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
csum_insertion, priv->mode, 1, last_segment, csum_insertion, priv->mode, 0, last_segment,
skb->len); skb->len);
} else {
stmmac_set_tx_owner(priv, first);
} }
if (tx_q->tbs & STMMAC_TBS_EN) {
struct timespec64 ts = ns_to_timespec64(skb->tstamp);
tbs_desc = &tx_q->dma_entx[first_entry];
stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
}
stmmac_set_tx_owner(priv, first);
/* The own bit must be the latest setting done when prepare the /* The own bit must be the latest setting done when prepare the
* descriptor and then barrier is needed to make sure that * descriptor and then barrier is needed to make sure that
* all is coherent before granting the DMA engine. * all is coherent before granting the DMA engine.
...@@ -3370,7 +3432,14 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3370,7 +3432,14 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
stmmac_enable_dma_transmission(priv, priv->ioaddr); stmmac_enable_dma_transmission(priv, priv->ioaddr);
tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc)); if (likely(priv->extend_desc))
desc_size = sizeof(struct dma_extended_desc);
else if (tx_q->tbs & STMMAC_TBS_AVAIL)
desc_size = sizeof(struct dma_edesc);
else
desc_size = sizeof(struct dma_desc);
tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
stmmac_tx_timer_arm(priv, queue); stmmac_tx_timer_arm(priv, queue);
...@@ -4193,7 +4262,7 @@ static int stmmac_rings_status_show(struct seq_file *seq, void *v) ...@@ -4193,7 +4262,7 @@ static int stmmac_rings_status_show(struct seq_file *seq, void *v)
seq_printf(seq, "Extended descriptor ring:\n"); seq_printf(seq, "Extended descriptor ring:\n");
sysfs_display_ring((void *)tx_q->dma_etx, sysfs_display_ring((void *)tx_q->dma_etx,
DMA_TX_SIZE, 1, seq); DMA_TX_SIZE, 1, seq);
} else { } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
seq_printf(seq, "Descriptor ring:\n"); seq_printf(seq, "Descriptor ring:\n");
sysfs_display_ring((void *)tx_q->dma_tx, sysfs_display_ring((void *)tx_q->dma_tx,
DMA_TX_SIZE, 0, seq); DMA_TX_SIZE, 0, seq);
......
...@@ -139,6 +139,7 @@ struct stmmac_txq_cfg { ...@@ -139,6 +139,7 @@ struct stmmac_txq_cfg {
u32 low_credit; u32 low_credit;
bool use_prio; bool use_prio;
u32 prio; u32 prio;
int tbs_en;
}; };
struct plat_stmmacenet_data { struct plat_stmmacenet_data {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment