Commit 989316dd authored by Auke Kok's avatar Auke Kok Committed by Auke Kok

ixgb: revert an unwanted fix regarding tso/descriptors

There seemed to be another bug introduced as well as a performance hit
with the addtion of the sentinel descriptor workaround.  Removal of
this workaround appears to prevent the hang.  We'll take a risk
and remove it, as we had never seen the originally reported bug
under linux.
Signed-off-by: default avatarJesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: default avatarAuke Kok <auke-jan.h.kok@intel.com>
Signed-off-by: default avatarJohn Ronciak <john.ronciak@intel.com>
parent 8556f0d1
......@@ -1295,7 +1295,6 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
struct ixgb_buffer *buffer_info;
int len = skb->len;
unsigned int offset = 0, size, count = 0, i;
unsigned int mss = skb_shinfo(skb)->tso_size;
unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
unsigned int f;
......@@ -1307,11 +1306,6 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
while(len) {
buffer_info = &tx_ring->buffer_info[i];
size = min(len, IXGB_MAX_JUMBO_FRAME_SIZE);
/* Workaround for premature desc write-backs
* in TSO mode. Append 4-byte sentinel desc */
if(unlikely(mss && !nr_frags && size == len && size > 8))
size -= 4;
buffer_info->length = size;
buffer_info->dma =
pci_map_single(adapter->pdev,
......@@ -1337,12 +1331,6 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
while(len) {
buffer_info = &tx_ring->buffer_info[i];
size = min(len, IXGB_MAX_JUMBO_FRAME_SIZE);
/* Workaround for premature desc write-backs
* in TSO mode. Append 4-byte sentinel desc */
if(unlikely(mss && (f == (nr_frags-1)) && (size == len)
&& (size > 8)))
size -= 4;
buffer_info->length = size;
buffer_info->dma =
pci_map_page(adapter->pdev,
......@@ -1421,8 +1409,7 @@ ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
#define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \
(((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
#define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) + \
MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1 \
/* one more for TSO workaround */ + 1
MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1
static int
ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment