Commit beb98814 authored by Siva Reddy Kallam's avatar Siva Reddy Kallam Committed by Sasha Levin

tg3: Fix for tg3 transmit queue 0 timed out when too many gso_segs

[ Upstream commit b7d98729 ]

tg3_tso_bug() can hit a condition where the entire tx ring is not big
enough to segment the GSO packet. For example, if MSS is very small,
gso_segs can exceed the tx ring size. When we hit the condition, it
will cause tx timeout.

tg3_tso_bug() is called to handle TSO and DMA hardware bugs.
For TSO bugs, if tg3_tso_bug() cannot succeed, we have to drop the packet.
For DMA bugs, we can still fall back to linearize the SKB and let the
hardware transmit the TSO packet.

This patch adds a function tg3_tso_bug_gso_check() to check if there
are enough tx descriptors for GSO before calling tg3_tso_bug().
The caller will then handle the error appropriately - drop or
lineraize the SKB.

v2: Corrected patch description to avoid confusion.
Signed-off-by: default avatarSiva Reddy Kallam <siva.kallam@broadcom.com>
Signed-off-by: default avatarMichael Chan <mchan@broadcom.com>
Acked-by: default avatarPrashant Sreedharan <prashant@broadcom.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
Signed-off-by: default avatarSasha Levin <sasha.levin@oracle.com>
parent 3faf4654
...@@ -7833,6 +7833,14 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, ...@@ -7833,6 +7833,14 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
return ret; return ret;
} }
static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
{
/* Check if we will never have enough descriptors,
* as gso_segs can be more than current ring size
*/
return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
}
static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *); static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
/* Use GSO to workaround all TSO packets that meet HW bug conditions /* Use GSO to workaround all TSO packets that meet HW bug conditions
...@@ -7936,14 +7944,19 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -7936,14 +7944,19 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
* vlan encapsulated. * vlan encapsulated.
*/ */
if (skb->protocol == htons(ETH_P_8021Q) || if (skb->protocol == htons(ETH_P_8021Q) ||
skb->protocol == htons(ETH_P_8021AD)) skb->protocol == htons(ETH_P_8021AD)) {
if (tg3_tso_bug_gso_check(tnapi, skb))
return tg3_tso_bug(tp, tnapi, txq, skb); return tg3_tso_bug(tp, tnapi, txq, skb);
goto drop;
}
if (!skb_is_gso_v6(skb)) { if (!skb_is_gso_v6(skb)) {
if (unlikely((ETH_HLEN + hdr_len) > 80) && if (unlikely((ETH_HLEN + hdr_len) > 80) &&
tg3_flag(tp, TSO_BUG)) tg3_flag(tp, TSO_BUG)) {
if (tg3_tso_bug_gso_check(tnapi, skb))
return tg3_tso_bug(tp, tnapi, txq, skb); return tg3_tso_bug(tp, tnapi, txq, skb);
goto drop;
}
ip_csum = iph->check; ip_csum = iph->check;
ip_tot_len = iph->tot_len; ip_tot_len = iph->tot_len;
iph->check = 0; iph->check = 0;
...@@ -8075,7 +8088,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -8075,7 +8088,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (would_hit_hwbug) { if (would_hit_hwbug) {
tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i); tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
if (mss) { if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
/* If it's a TSO packet, do GSO instead of /* If it's a TSO packet, do GSO instead of
* allocating and copying to a large linear SKB * allocating and copying to a large linear SKB
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment