Commit c2837423 authored by Jose Abreu's avatar Jose Abreu Committed by David S. Miller

net: stmmac: Rework TX Coalesce logic

Coalesce logic currently increments the number of packets and sets the
IC bit when the coalesced packets have passed a given limit. This does
not reflect very well what coalesce was meant for as we can have a large
number of packets that are coalesced and then a single one, sent later
on that has the IC bit.

Rework the logic so that it coalesces only upon a limit of packets and
sets the IC bit for large number of packets.
Signed-off-by: default avatarJose Abreu <Jose.Abreu@synopsys.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent da202451
...@@ -2916,16 +2916,17 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2916,16 +2916,17 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
struct stmmac_priv *priv = netdev_priv(dev); struct stmmac_priv *priv = netdev_priv(dev);
int nfrags = skb_shinfo(skb)->nr_frags; int nfrags = skb_shinfo(skb)->nr_frags;
u32 queue = skb_get_queue_mapping(skb); u32 queue = skb_get_queue_mapping(skb);
unsigned int first_entry, tx_packets;
int tmp_pay_len = 0, first_tx;
struct stmmac_tx_queue *tx_q; struct stmmac_tx_queue *tx_q;
unsigned int first_entry;
u8 proto_hdr_len, hdr; u8 proto_hdr_len, hdr;
int tmp_pay_len = 0; bool has_vlan, set_ic;
u32 pay_len, mss; u32 pay_len, mss;
dma_addr_t des; dma_addr_t des;
bool has_vlan;
int i; int i;
tx_q = &priv->tx_queue[queue]; tx_q = &priv->tx_queue[queue];
first_tx = tx_q->cur_tx;
/* Compute header lengths */ /* Compute header lengths */
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
...@@ -3033,16 +3034,27 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3033,16 +3034,27 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
tx_q->tx_skbuff[tx_q->cur_tx] = skb; tx_q->tx_skbuff[tx_q->cur_tx] = skb;
/* Manage tx mitigation */ /* Manage tx mitigation */
tx_q->tx_count_frames += nfrags + 1; tx_packets = (tx_q->cur_tx + 1) - first_tx;
if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) && tx_q->tx_count_frames += tx_packets;
!((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
priv->hwts_tx_en)) { if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
stmmac_tx_timer_arm(priv, queue); set_ic = true;
} else { else if (!priv->tx_coal_frames)
set_ic = false;
else if (tx_packets > priv->tx_coal_frames)
set_ic = true;
else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets)
set_ic = true;
else
set_ic = false;
if (set_ic) {
desc = &tx_q->dma_tx[tx_q->cur_tx]; desc = &tx_q->dma_tx[tx_q->cur_tx];
tx_q->tx_count_frames = 0; tx_q->tx_count_frames = 0;
stmmac_set_tx_ic(priv, desc); stmmac_set_tx_ic(priv, desc);
priv->xstats.tx_set_ic_bit++; priv->xstats.tx_set_ic_bit++;
} else {
stmmac_tx_timer_arm(priv, queue);
} }
/* We've used all descriptors we need for this skb, however, /* We've used all descriptors we need for this skb, however,
...@@ -3133,6 +3145,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3133,6 +3145,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
*/ */
static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
{ {
unsigned int first_entry, tx_packets, enh_desc;
struct stmmac_priv *priv = netdev_priv(dev); struct stmmac_priv *priv = netdev_priv(dev);
unsigned int nopaged_len = skb_headlen(skb); unsigned int nopaged_len = skb_headlen(skb);
int i, csum_insertion = 0, is_jumbo = 0; int i, csum_insertion = 0, is_jumbo = 0;
...@@ -3141,13 +3154,12 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3141,13 +3154,12 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
int gso = skb_shinfo(skb)->gso_type; int gso = skb_shinfo(skb)->gso_type;
struct dma_desc *desc, *first; struct dma_desc *desc, *first;
struct stmmac_tx_queue *tx_q; struct stmmac_tx_queue *tx_q;
unsigned int first_entry; bool has_vlan, set_ic;
unsigned int enh_desc; int entry, first_tx;
dma_addr_t des; dma_addr_t des;
bool has_vlan;
int entry;
tx_q = &priv->tx_queue[queue]; tx_q = &priv->tx_queue[queue];
first_tx = tx_q->cur_tx;
if (priv->tx_path_in_lpi_mode) if (priv->tx_path_in_lpi_mode)
stmmac_disable_eee_mode(priv); stmmac_disable_eee_mode(priv);
...@@ -3241,12 +3253,21 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3241,12 +3253,21 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
* This approach takes care about the fragments: desc is the first * This approach takes care about the fragments: desc is the first
* element in case of no SG. * element in case of no SG.
*/ */
tx_q->tx_count_frames += nfrags + 1; tx_packets = (entry + 1) - first_tx;
if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) && tx_q->tx_count_frames += tx_packets;
!((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
priv->hwts_tx_en)) { if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
stmmac_tx_timer_arm(priv, queue); set_ic = true;
} else { else if (!priv->tx_coal_frames)
set_ic = false;
else if (tx_packets > priv->tx_coal_frames)
set_ic = true;
else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets)
set_ic = true;
else
set_ic = false;
if (set_ic) {
if (likely(priv->extend_desc)) if (likely(priv->extend_desc))
desc = &tx_q->dma_etx[entry].basic; desc = &tx_q->dma_etx[entry].basic;
else else
...@@ -3255,6 +3276,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3255,6 +3276,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
tx_q->tx_count_frames = 0; tx_q->tx_count_frames = 0;
stmmac_set_tx_ic(priv, desc); stmmac_set_tx_ic(priv, desc);
priv->xstats.tx_set_ic_bit++; priv->xstats.tx_set_ic_bit++;
} else {
stmmac_tx_timer_arm(priv, queue);
} }
/* We've used all descriptors we need for this skb, however, /* We've used all descriptors we need for this skb, however,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment