Commit 4c6de2fe authored by David S. Miller's avatar David S. Miller

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/ppwaskie/net-next

Peter P Waskiewicz Jr says:

====================
This series contains multiple updates to the ixgbe driver.

The following are changes since commit 02644a17:
    sctp: fix bogus if statement in sctp_auth_recv_cid()

and are available in the git repository at:
  git://git.kernel.org/pub/scm/linux/kernel/git/ppwaskie/net-next master

Alexander Duyck (9):
  ixgbe: Remove code that was initializing Rx page offset
  ixgbe: combine ixgbe_add_rx_frag and ixgbe_can_reuse_page
  ixgbe: Only use double buffering if page size is less than 8K
  ixgbe: Have the CPU take ownership of the buffers sooner
  ixgbe: Make pull tail function separate from rest of cleanup_headers
  ixgbe: Copybreak sooner to avoid get_page/put_page and offset change
    overhead
  ixgbe: Make allocating skb and placing data in it a separate function
  ixgbe: Roll RSC code into non-EOP code
  ixgbe: Rewrite code related to configuring IFCS bit in Tx descriptor
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents e6a04b1d 62748b7b
......@@ -78,6 +78,9 @@
/* Supported Rx Buffer Sizes */
#define IXGBE_RXBUFFER_256 256 /* Used for skb receive header */
#define IXGBE_RXBUFFER_2K 2048
#define IXGBE_RXBUFFER_3K 3072
#define IXGBE_RXBUFFER_4K 4096
#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */
/*
......@@ -104,6 +107,7 @@
#define IXGBE_TX_FLAGS_FSO (u32)(1 << 6)
#define IXGBE_TX_FLAGS_TXSW (u32)(1 << 7)
#define IXGBE_TX_FLAGS_TSTAMP (u32)(1 << 8)
#define IXGBE_TX_FLAGS_NO_IFCS (u32)(1 << 9)
#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
#define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29
......@@ -293,16 +297,25 @@ struct ixgbe_ring_feature {
* this is twice the size of a half page we need to double the page order
* for FCoE enabled Rx queues.
*/
#if defined(IXGBE_FCOE) && (PAGE_SIZE < 8192)
static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring)
{
return test_bit(__IXGBE_RX_FCOE, &ring->state) ? 1 : 0;
#ifdef IXGBE_FCOE
if (test_bit(__IXGBE_RX_FCOE, &ring->state))
return (PAGE_SIZE < 8192) ? IXGBE_RXBUFFER_4K :
IXGBE_RXBUFFER_3K;
#endif
return IXGBE_RXBUFFER_2K;
}
#else
#define ixgbe_rx_pg_order(_ring) 0
static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
{
#ifdef IXGBE_FCOE
if (test_bit(__IXGBE_RX_FCOE, &ring->state))
return (PAGE_SIZE < 8192) ? 1 : 0;
#endif
return 0;
}
#define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring))
#define ixgbe_rx_bufsz(_ring) ((PAGE_SIZE / 2) << ixgbe_rx_pg_order(_ring))
struct ixgbe_ring_container {
struct ixgbe_ring *ring; /* pointer to linked list of rings */
......
......@@ -1167,7 +1167,7 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
}
bi->dma = dma;
bi->page_offset ^= ixgbe_rx_bufsz(rx_ring);
bi->page_offset = 0;
return true;
}
......@@ -1320,29 +1320,6 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
return max_len;
}
static void ixgbe_get_rsc_cnt(struct ixgbe_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
__le32 rsc_enabled;
u32 rsc_cnt;
if (!ring_is_rsc_enabled(rx_ring))
return;
rsc_enabled = rx_desc->wb.lower.lo_dword.data &
cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK);
/* If this is an RSC frame rsc_cnt should be non-zero */
if (!rsc_enabled)
return;
rsc_cnt = le32_to_cpu(rsc_enabled);
rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT;
IXGBE_CB(skb)->append_cnt += rsc_cnt - 1;
}
static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
struct sk_buff *skb)
{
......@@ -1440,15 +1417,27 @@ static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring,
prefetch(IXGBE_RX_DESC(rx_ring, ntc));
if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
return false;
/* update RSC append count if present */
if (ring_is_rsc_enabled(rx_ring)) {
__le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data &
cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK);
/* append_cnt indicates packet is RSC, if so fetch nextp */
if (IXGBE_CB(skb)->append_cnt) {
if (unlikely(rsc_enabled)) {
u32 rsc_cnt = le32_to_cpu(rsc_enabled);
rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT;
IXGBE_CB(skb)->append_cnt += rsc_cnt - 1;
/* update ntc based on RSC value */
ntc = le32_to_cpu(rx_desc->wb.upper.status_error);
ntc &= IXGBE_RXDADV_NEXTP_MASK;
ntc >>= IXGBE_RXDADV_NEXTP_SHIFT;
}
}
/* if we are the last buffer then there is nothing else to do */
if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
return false;
/* place skb in next buffer to be received */
rx_ring->rx_buffer_info[ntc].skb = skb;
......@@ -1458,54 +1447,24 @@ static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring,
}
/**
* ixgbe_cleanup_headers - Correct corrupted or empty headers
* ixgbe_pull_tail - ixgbe specific version of skb_pull_tail
* @rx_ring: rx descriptor ring packet is being transacted on
* @rx_desc: pointer to the EOP Rx descriptor
* @skb: pointer to current skb being fixed
*
* Check for corrupted packet headers caused by senders on the local L2
* embedded NIC switch not setting up their Tx Descriptors right. These
* should be very rare.
* @skb: pointer to current skb being adjusted
*
* Also address the case where we are pulling data in on pages only
* and as such no data is present in the skb header.
*
* In addition if skb is not at least 60 bytes we need to pad it so that
* it is large enough to qualify as a valid Ethernet frame.
*
* Returns true if an error was encountered and skb was freed.
**/
static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
* This function is an ixgbe specific version of __pskb_pull_tail. The
* main difference between this version and the original function is that
* this function can make several assumptions about the state of things
* that allow for significant optimizations versus the standard function.
* As a result we can do things like drop a frag and maintain an accurate
* truesize for the skb.
*/
static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring,
struct sk_buff *skb)
{
struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
struct net_device *netdev = rx_ring->netdev;
unsigned char *va;
unsigned int pull_len;
/* if the page was released unmap it, else just sync our portion */
if (unlikely(IXGBE_CB(skb)->page_released)) {
dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma,
ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
IXGBE_CB(skb)->page_released = false;
} else {
dma_sync_single_range_for_cpu(rx_ring->dev,
IXGBE_CB(skb)->dma,
frag->page_offset,
ixgbe_rx_bufsz(rx_ring),
DMA_FROM_DEVICE);
}
IXGBE_CB(skb)->dma = 0;
/* verify that the packet does not have any known errors */
if (unlikely(ixgbe_test_staterr(rx_desc,
IXGBE_RXDADV_ERR_FRAME_ERR_MASK) &&
!(netdev->features & NETIF_F_RXALL))) {
dev_kfree_skb_any(skb);
return true;
}
/*
* it is valid to use page_address instead of kmap since we are
* working with pages allocated out of the lomem pool per
......@@ -1517,8 +1476,6 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
* we need the header to contain the greater of either ETH_HLEN or
* 60 bytes if the skb->len is less than 60 for skb_pad.
*/
pull_len = skb_frag_size(frag);
if (pull_len > IXGBE_RX_HDR_SIZE)
pull_len = ixgbe_get_headlen(va, IXGBE_RX_HDR_SIZE);
/* align pull length to size of long to optimize memcpy performance */
......@@ -1529,18 +1486,74 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
frag->page_offset += pull_len;
skb->data_len -= pull_len;
skb->tail += pull_len;
}
/*
* if we sucked the frag empty then we should free it,
* if there are other frags here something is screwed up in hardware
/**
* ixgbe_dma_sync_frag - perform DMA sync for first frag of SKB
* @rx_ring: rx descriptor ring packet is being transacted on
* @skb: pointer to current skb being updated
*
* This function provides a basic DMA sync up for the first fragment of an
* skb. The reason for doing this is that the first fragment cannot be
* unmapped until we have reached the end of packet descriptor for a buffer
* chain.
*/
if (skb_frag_size(frag) == 0) {
BUG_ON(skb_shinfo(skb)->nr_frags != 1);
skb_shinfo(skb)->nr_frags = 0;
__skb_frag_unref(frag);
skb->truesize -= ixgbe_rx_bufsz(rx_ring);
static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
struct sk_buff *skb)
{
/* if the page was released unmap it, else just sync our portion */
if (unlikely(IXGBE_CB(skb)->page_released)) {
dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma,
ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
IXGBE_CB(skb)->page_released = false;
} else {
struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
dma_sync_single_range_for_cpu(rx_ring->dev,
IXGBE_CB(skb)->dma,
frag->page_offset,
ixgbe_rx_bufsz(rx_ring),
DMA_FROM_DEVICE);
}
IXGBE_CB(skb)->dma = 0;
}
/**
* ixgbe_cleanup_headers - Correct corrupted or empty headers
* @rx_ring: rx descriptor ring packet is being transacted on
* @rx_desc: pointer to the EOP Rx descriptor
* @skb: pointer to current skb being fixed
*
* Check for corrupted packet headers caused by senders on the local L2
* embedded NIC switch not setting up their Tx Descriptors right. These
* should be very rare.
*
* Also address the case where we are pulling data in on pages only
* and as such no data is present in the skb header.
*
* In addition if skb is not at least 60 bytes we need to pad it so that
* it is large enough to qualify as a valid Ethernet frame.
*
* Returns true if an error was encountered and skb was freed.
**/
static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
struct net_device *netdev = rx_ring->netdev;
/* verify that the packet does not have any known errors */
if (unlikely(ixgbe_test_staterr(rx_desc,
IXGBE_RXDADV_ERR_FRAME_ERR_MASK) &&
!(netdev->features & NETIF_F_RXALL))) {
dev_kfree_skb_any(skb);
return true;
}
/* place header in linear portion of buffer */
if (skb_is_nonlinear(skb))
ixgbe_pull_tail(rx_ring, skb);
#ifdef IXGBE_FCOE
/* do not attempt to pad FCoE Frames as this will disrupt DDP */
if (ixgbe_rx_is_fcoe(rx_ring, rx_desc))
......@@ -1559,34 +1572,18 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
return false;
}
/**
* ixgbe_can_reuse_page - determine if we can reuse a page
* @rx_buffer: pointer to rx_buffer containing the page we want to reuse
*
* Returns true if page can be reused in another Rx buffer
**/
static inline bool ixgbe_can_reuse_page(struct ixgbe_rx_buffer *rx_buffer)
{
struct page *page = rx_buffer->page;
/* if we are only owner of page and it is local we can reuse it */
return likely(page_count(page) == 1) &&
likely(page_to_nid(page) == numa_node_id());
}
/**
* ixgbe_reuse_rx_page - page flip buffer and store it back on the ring
* @rx_ring: rx descriptor ring to store buffers on
* @old_buff: donor buffer to have page reused
*
* Syncronizes page for reuse by the adapter
* Synchronizes page for reuse by the adapter
**/
static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
struct ixgbe_rx_buffer *old_buff)
{
struct ixgbe_rx_buffer *new_buff;
u16 nta = rx_ring->next_to_alloc;
u16 bufsz = ixgbe_rx_bufsz(rx_ring);
new_buff = &rx_ring->rx_buffer_info[nta];
......@@ -1597,17 +1594,13 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
/* transfer page from old buffer to new buffer */
new_buff->page = old_buff->page;
new_buff->dma = old_buff->dma;
/* flip page offset to other buffer and store to new_buff */
new_buff->page_offset = old_buff->page_offset ^ bufsz;
new_buff->page_offset = old_buff->page_offset;
/* sync the buffer for use by the device */
dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
new_buff->page_offset, bufsz,
new_buff->page_offset,
ixgbe_rx_bufsz(rx_ring),
DMA_FROM_DEVICE);
/* bump ref count on page before it is given to the stack */
get_page(new_buff->page);
}
/**
......@@ -1617,73 +1610,86 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
* @rx_desc: descriptor containing length of buffer written by hardware
* @skb: sk_buff to place the data into
*
* This function is based on skb_add_rx_frag. I would have used that
* function however it doesn't handle the truesize case correctly since we
* are allocating more memory than might be used for a single receive.
* This function will add the data contained in rx_buffer->page to the skb.
* This is done either through a direct copy if the data in the buffer is
* less than the skb header size, otherwise it will just attach the page as
* a frag to the skb.
*
* The function will then update the page offset if necessary and return
* true if the buffer can be reused by the adapter.
**/
static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
struct ixgbe_rx_buffer *rx_buffer,
struct sk_buff *skb, int size)
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
rx_buffer->page, rx_buffer->page_offset,
size);
skb->len += size;
skb->data_len += size;
skb->truesize += ixgbe_rx_bufsz(rx_ring);
}
struct page *page = rx_buffer->page;
unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
#if (PAGE_SIZE < 8192)
unsigned int truesize = ixgbe_rx_bufsz(rx_ring);
#else
unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
unsigned int last_offset = ixgbe_rx_pg_size(rx_ring) -
ixgbe_rx_bufsz(rx_ring);
#endif
/**
* ixgbe_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
* @q_vector: structure containing interrupt and ring information
* @rx_ring: rx descriptor ring to transact packets on
* @budget: Total limit on number of packets to process
*
* This function provides a "bounce buffer" approach to Rx interrupt
* processing. The advantage to this is that on systems that have
* expensive overhead for IOMMU access this provides a means of avoiding
* it by maintaining the mapping of the page to the syste.
*
* Returns true if all work is completed without reaching budget
**/
static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring *rx_ring,
int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
#ifdef IXGBE_FCOE
struct ixgbe_adapter *adapter = q_vector->adapter;
int ddp_bytes = 0;
#endif /* IXGBE_FCOE */
u16 cleaned_count = ixgbe_desc_unused(rx_ring);
if ((size <= IXGBE_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
unsigned char *va = page_address(page) + rx_buffer->page_offset;
do {
struct ixgbe_rx_buffer *rx_buffer;
union ixgbe_adv_rx_desc *rx_desc;
struct sk_buff *skb;
struct page *page;
u16 ntc;
memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
cleaned_count = 0;
/* we can reuse buffer as-is, just make sure it is local */
if (likely(page_to_nid(page) == numa_node_id()))
return true;
/* this page cannot be reused so discard it */
put_page(page);
return false;
}
ntc = rx_ring->next_to_clean;
rx_desc = IXGBE_RX_DESC(rx_ring, ntc);
rx_buffer = &rx_ring->rx_buffer_info[ntc];
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
rx_buffer->page_offset, size, truesize);
if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD))
break;
/* avoid re-using remote pages */
if (unlikely(page_to_nid(page) != numa_node_id()))
return false;
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
if (unlikely(page_count(page) != 1))
return false;
/* flip page offset to other buffer */
rx_buffer->page_offset ^= truesize;
/*
* This memory barrier is needed to keep us from reading
* any other fields out of the rx_desc until we know the
* RXD_STAT_DD bit is set
* since we are the only owner of the page and we need to
* increment it, just set the value to 2 in order to avoid
* an unecessary locked operation
*/
rmb();
atomic_set(&page->_count, 2);
#else
/* move offset up to the next cache line */
rx_buffer->page_offset += truesize;
if (rx_buffer->page_offset > last_offset)
return false;
/* bump ref count on page before it is given to the stack */
get_page(page);
#endif
return true;
}
static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc)
{
struct ixgbe_rx_buffer *rx_buffer;
struct sk_buff *skb;
struct page *page;
rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
page = rx_buffer->page;
prefetchw(page);
......@@ -1704,7 +1710,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
IXGBE_RX_HDR_SIZE);
if (unlikely(!skb)) {
rx_ring->rx_stats.alloc_rx_buff_failed++;
break;
return NULL;
}
/*
......@@ -1720,8 +1726,15 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
* after the writeback. Only unmap it when EOP is
* reached
*/
if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
goto dma_sync;
IXGBE_CB(skb)->dma = rx_buffer->dma;
} else {
if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
ixgbe_dma_sync_frag(rx_ring, skb);
dma_sync:
/* we are reusing so sync this buffer for CPU use */
dma_sync_single_range_for_cpu(rx_ring->dev,
rx_buffer->dma,
......@@ -1731,10 +1744,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
}
/* pull page into skb */
ixgbe_add_rx_frag(rx_ring, rx_buffer, skb,
le16_to_cpu(rx_desc->wb.upper.length));
if (ixgbe_can_reuse_page(rx_buffer)) {
if (ixgbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
/* hand second half of page back to the ring */
ixgbe_reuse_rx_page(rx_ring, rx_buffer);
} else if (IXGBE_CB(skb)->dma == rx_buffer->dma) {
......@@ -1752,7 +1762,61 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
rx_buffer->dma = 0;
rx_buffer->page = NULL;
ixgbe_get_rsc_cnt(rx_ring, rx_desc, skb);
return skb;
}
/**
* ixgbe_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
* @q_vector: structure containing interrupt and ring information
* @rx_ring: rx descriptor ring to transact packets on
* @budget: Total limit on number of packets to process
*
* This function provides a "bounce buffer" approach to Rx interrupt
* processing. The advantage to this is that on systems that have
* expensive overhead for IOMMU access this provides a means of avoiding
* it by maintaining the mapping of the page to the syste.
*
* Returns true if all work is completed without reaching budget
**/
static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring *rx_ring,
int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
#ifdef IXGBE_FCOE
struct ixgbe_adapter *adapter = q_vector->adapter;
int ddp_bytes = 0;
#endif /* IXGBE_FCOE */
u16 cleaned_count = ixgbe_desc_unused(rx_ring);
do {
union ixgbe_adv_rx_desc *rx_desc;
struct sk_buff *skb;
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
cleaned_count = 0;
}
rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD))
break;
/*
* This memory barrier is needed to keep us from reading
* any other fields out of the rx_desc until we know the
* RXD_STAT_DD bit is set
*/
rmb();
/* retrieve a buffer from the ring */
skb = ixgbe_fetch_rx_buffer(rx_ring, rx_desc);
/* exit if we failed to retrieve a buffer */
if (!skb)
break;
cleaned_count++;
......@@ -2868,11 +2932,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
/* configure the packet buffer length */
#if PAGE_SIZE > IXGBE_MAX_RXBUFFER
srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
#else
srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
#endif
/* configure descriptor type */
srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
......@@ -2980,13 +3040,7 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
* total size of max desc * buf_len is not greater
* than 65536
*/
#if (PAGE_SIZE <= 8192)
rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
#elif (PAGE_SIZE <= 16384)
rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
#else
rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
#endif
IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
}
......@@ -4129,27 +4183,6 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
}
/**
* ixgbe_init_rx_page_offset - initialize page offset values for Rx buffers
* @rx_ring: ring to setup
*
* On many IA platforms the L1 cache has a critical stride of 4K, this
* results in each receive buffer starting in the same cache set. To help
* reduce the pressure on this cache set we can interleave the offsets so
* that only every other buffer will be in the same cache set.
**/
static void ixgbe_init_rx_page_offset(struct ixgbe_ring *rx_ring)
{
struct ixgbe_rx_buffer *rx_buffer = rx_ring->rx_buffer_info;
u16 i;
for (i = 0; i < rx_ring->count; i += 2) {
rx_buffer[0].page_offset = 0;
rx_buffer[1].page_offset = ixgbe_rx_bufsz(rx_ring);
rx_buffer = &rx_buffer[2];
}
}
/**
* ixgbe_clean_rx_ring - Free Rx Buffers per Queue
* @rx_ring: ring to free buffers from
......@@ -4195,8 +4228,6 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
memset(rx_ring->rx_buffer_info, 0, size);
ixgbe_init_rx_page_offset(rx_ring);
/* Zero out the descriptor ring */
memset(rx_ring->desc, 0, rx_ring->size);
......@@ -4646,8 +4677,6 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
ixgbe_init_rx_page_offset(rx_ring);
return 0;
err:
vfree(rx_ring->rx_buffer_info);
......@@ -5874,9 +5903,12 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
u32 type_tucmd = 0;
if (skb->ip_summed != CHECKSUM_PARTIAL) {
if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) &&
!(first->tx_flags & IXGBE_TX_FLAGS_TXSW))
if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN)) {
if (unlikely(skb->no_fcs))
first->tx_flags |= IXGBE_TX_FLAGS_NO_IFCS;
if (!(first->tx_flags & IXGBE_TX_FLAGS_TXSW))
return;
}
} else {
u8 l4_hdr = 0;
switch (first->protocol) {
......@@ -5938,7 +5970,6 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags)
{
/* set type for advanced descriptor with frame checksum insertion */
__le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
IXGBE_ADVTXD_DCMD_IFCS |
IXGBE_ADVTXD_DCMD_DEXT);
/* set HW vlan bit if vlan is present */
......@@ -5958,6 +5989,10 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags)
#endif
cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
/* insert frame checksum */
if (!(tx_flags & IXGBE_TX_FLAGS_NO_IFCS))
cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_IFCS);
return cmd_type;
}
......@@ -6063,8 +6098,6 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
if (likely(!data_len))
break;
if (unlikely(skb->no_fcs))
cmd_type &= ~(cpu_to_le32(IXGBE_ADVTXD_DCMD_IFCS));
tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
i++;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment