Commit 1a8782e5 authored by Alexander Duyck's avatar Alexander Duyck Committed by Jeff Kirsher

fm10k: fold fm10k_pull_tail into fm10k_add_rx_frag

This change folds the fm10k_pull_tail call into fm10k_add_rx_frag.  The
advantage to doing this is that the fragment doesn't have to be modified
after it is added to the skb.
Signed-off-by: default avatarAlexander Duyck <alexander.h.duyck@redhat.com>
Tested-by: default avatarKrishneil Singh <Krishneil.k.singh@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent 89d256bb
...@@ -269,16 +269,19 @@ static bool fm10k_add_rx_frag(struct fm10k_rx_buffer *rx_buffer, ...@@ -269,16 +269,19 @@ static bool fm10k_add_rx_frag(struct fm10k_rx_buffer *rx_buffer,
struct sk_buff *skb) struct sk_buff *skb)
{ {
struct page *page = rx_buffer->page; struct page *page = rx_buffer->page;
unsigned char *va = page_address(page) + rx_buffer->page_offset;
unsigned int size = le16_to_cpu(rx_desc->w.length); unsigned int size = le16_to_cpu(rx_desc->w.length);
#if (PAGE_SIZE < 8192) #if (PAGE_SIZE < 8192)
unsigned int truesize = FM10K_RX_BUFSZ; unsigned int truesize = FM10K_RX_BUFSZ;
#else #else
unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); unsigned int truesize = SKB_DATA_ALIGN(size);
#endif #endif
unsigned int pull_len;
if ((size <= FM10K_RX_HDR_LEN) && !skb_is_nonlinear(skb)) { if (unlikely(skb_is_nonlinear(skb)))
unsigned char *va = page_address(page) + rx_buffer->page_offset; goto add_tail_frag;
if (likely(size <= FM10K_RX_HDR_LEN)) {
memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
/* page is not reserved, we can reuse buffer as-is */ /* page is not reserved, we can reuse buffer as-is */
...@@ -290,8 +293,21 @@ static bool fm10k_add_rx_frag(struct fm10k_rx_buffer *rx_buffer, ...@@ -290,8 +293,21 @@ static bool fm10k_add_rx_frag(struct fm10k_rx_buffer *rx_buffer,
return false; return false;
} }
/* we need the header to contain the greater of either ETH_HLEN or
* 60 bytes if the skb->len is less than 60 for skb_pad.
*/
pull_len = eth_get_headlen(va, FM10K_RX_HDR_LEN);
/* align pull length to size of long to optimize memcpy performance */
memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
/* update all of the pointers */
va += pull_len;
size -= pull_len;
add_tail_frag:
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
rx_buffer->page_offset, size, truesize); (unsigned long)va & ~PAGE_MASK, size, truesize);
return fm10k_can_reuse_rx_page(rx_buffer, page, truesize); return fm10k_can_reuse_rx_page(rx_buffer, page, truesize);
} }
...@@ -517,44 +533,6 @@ static bool fm10k_is_non_eop(struct fm10k_ring *rx_ring, ...@@ -517,44 +533,6 @@ static bool fm10k_is_non_eop(struct fm10k_ring *rx_ring,
return true; return true;
} }
/**
* fm10k_pull_tail - fm10k specific version of skb_pull_tail
* @skb: pointer to current skb being adjusted
*
* This function is an fm10k specific version of __pskb_pull_tail. The
* main difference between this version and the original function is that
* this function can make several assumptions about the state of things
* that allow for significant optimizations versus the standard function.
* As a result we can do things like drop a frag and maintain an accurate
* truesize for the skb.
*/
static void fm10k_pull_tail(struct sk_buff *skb)
{
struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
unsigned char *va;
unsigned int pull_len;
/* it is valid to use page_address instead of kmap since we are
* working with pages allocated out of the lomem pool per
* alloc_page(GFP_ATOMIC)
*/
va = skb_frag_address(frag);
/* we need the header to contain the greater of either ETH_HLEN or
* 60 bytes if the skb->len is less than 60 for skb_pad.
*/
pull_len = eth_get_headlen(va, FM10K_RX_HDR_LEN);
/* align pull length to size of long to optimize memcpy performance */
skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
/* update all of the pointers */
skb_frag_size_sub(frag, pull_len);
frag->page_offset += pull_len;
skb->data_len -= pull_len;
skb->tail += pull_len;
}
/** /**
* fm10k_cleanup_headers - Correct corrupted or empty headers * fm10k_cleanup_headers - Correct corrupted or empty headers
* @rx_ring: rx descriptor ring packet is being transacted on * @rx_ring: rx descriptor ring packet is being transacted on
...@@ -580,10 +558,6 @@ static bool fm10k_cleanup_headers(struct fm10k_ring *rx_ring, ...@@ -580,10 +558,6 @@ static bool fm10k_cleanup_headers(struct fm10k_ring *rx_ring,
return true; return true;
} }
/* place header in linear portion of buffer */
if (skb_is_nonlinear(skb))
fm10k_pull_tail(skb);
/* if eth_skb_pad returns an error the skb was freed */ /* if eth_skb_pad returns an error the skb was freed */
if (eth_skb_pad(skb)) if (eth_skb_pad(skb))
return true; return true;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment